Merge branch 'master' into redis-pubsub-fix

This commit is contained in:
Yaron Schneider 2022-08-29 07:00:16 -07:00 committed by GitHub
commit edd2c9d5a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
398 changed files with 20008 additions and 4222 deletions

View File

@ -11,5 +11,5 @@ coverage:
# See https://docs.codecov.io/docs/commit-status#disabling-a-status.
default: false
comment:
# Delete old comment and post new one for new coverage information.
behavior: new
# Update old comment with new coverage information if the PR is changed. Avoids triggering multiple emails.
behavior: once

View File

@ -216,6 +216,7 @@ KEYVAULT_NAME_VAR_NAME="AzureKeyVaultName"
RESOURCE_GROUP_NAME_VAR_NAME="AzureResourceGroupName"
SERVICE_BUS_CONNECTION_STRING_VAR_NAME="AzureServiceBusConnectionString"
SERVICE_BUS_NAMESPACE_VAR_NAME="AzureServiceBusNamespace"
SQL_SERVER_NAME_VAR_NAME="AzureSqlServerName"
SQL_SERVER_DB_NAME_VAR_NAME="AzureSqlServerDbName"
@ -613,6 +614,9 @@ echo "Configuring Service Bus test settings ..."
SERVICE_BUS_CONNECTION_STRING="$(az servicebus namespace authorization-rule keys list --name RootManageSharedAccessKey --namespace-name "${SERVICE_BUS_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" --output tsv)"
echo export ${SERVICE_BUS_CONNECTION_STRING_VAR_NAME}=\"${SERVICE_BUS_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
az keyvault secret set --name "${SERVICE_BUS_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${SERVICE_BUS_CONNECTION_STRING}"
SERVICE_BUS_NAMESPACE="${SERVICE_BUS_NAME}.servicebus.windows.net"
echo export ${SERVICE_BUS_NAMESPACE_VAR_NAME}=\"${SERVICE_BUS_NAMESPACE}\" >> "${ENV_CONFIG_FILENAME}"
az keyvault secret set --name "${SERVICE_BUS_NAMESPACE_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${SERVICE_BUS_NAMESPACE}"
# ----------------------------------
# Populate SQL Server test settings
@ -726,6 +730,9 @@ az role assignment create --assignee "${CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID}" -
# IOT hub used in eventhubs certification test
az role assignment create --assignee "${CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID}" --role "Owner" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.Devices/IotHubs/${IOT_HUB_NAME}"
az role assignment create --assignee "${CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID}" --role "IoT Hub Data Contributor" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.Devices/IotHubs/${IOT_HUB_NAME}"
# Azure Service Bus
ASB_ID=$(az servicebus namespace show --resource-group "${RESOURCE_GROUP_NAME}" --name "${SERVICE_BUS_NAME}" --query "id" -otsv)
az role assignment create --assignee "${CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID}" --role "Azure Service Bus Data Owner" --scope "${ASB_ID}"
# Now export the service principal information
CERTIFICATION_TENANT_ID="$(az ad sp list --display-name "${CERTIFICATION_SPAUTH_SP_NAME}" --query "[].appOwnerTenantId" --output tsv)"

View File

@ -0,0 +1,7 @@
version: '2'
services:
memcached:
image: docker.io/memcached:1.6
ports:
- '11211:11211'

View File

@ -0,0 +1,9 @@
version: '2'
services:
rethinkdb:
image: rethinkdb:2.4
ports:
- 8081:8080
- 28015:28015
- 29015:29015

View File

@ -25,24 +25,11 @@ on:
- release-*
jobs:
pre_job:
name: Skip Duplicate Actions
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v3.4.0
with:
cancel_others: 'true'
paths_ignore: '["**.md", ".codecov.yaml", ".github/workflows/dapr-automerge.yml"]'
# Based on whether this is a PR or a scheduled run, we will run a different
# subset of the certification tests. This allows all the tests not requiring
# secrets to be executed on pull requests.
generate-matrix:
runs-on: ubuntu-latest
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' || github.event_name == 'repository_dispatch'
steps:
- name: Install yq
run: |
@ -59,6 +46,9 @@ jobs:
- state.postgresql
- bindings.alicloud.dubbo
- bindings.kafka
- secretstores.local.env
- secretstores.local.file
- bindings.rabbitmq
EOF
)
echo "::set-output name=pr-components::$PR_COMPONENTS"
@ -101,12 +91,16 @@ jobs:
required-secrets: AzureEventHubsBindingsConnectionString,AzureBlobStorageAccount,AzureBlobStorageAccessKey,AzureEventHubsBindingsHub,AzureEventHubsBindingsNamespace,AzureEventHubsBindingsConsumerGroup,AzureCertificationServicePrincipalClientId,AzureCertificationTenantId,AzureCertificationServicePrincipalClientSecret,AzureResourceGroupName,AzureCertificationSubscriptionId,AzureEventHubsBindingsContainer,AzureIotHubEventHubConnectionString,AzureIotHubName,AzureIotHubBindingsConsumerGroup
- component: pubsub.azure.eventhubs
required-secrets: AzureEventHubsPubsubTopicActiveConnectionString,AzureEventHubsPubsubNamespace,AzureEventHubsPubsubNamespaceConnectionString,AzureBlobStorageAccount,AzureBlobStorageAccessKey,AzureEventHubsPubsubContainer,AzureIotHubName,AzureIotHubEventHubConnectionString,AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret,AzureResourceGroupName,AzureCertificationSubscriptionId
- component: pubsub.azure.servicebus
required-secrets: AzureServiceBusConnectionString,AzureServiceBusNamespace, AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret
- component: bindings.azure.blobstorage
required-secrets: AzureBlobStorageAccount,AzureBlobStorageAccessKey,AzureBlobStorageContainer,AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret
- component: bindings.azure.storagequeues
required-secrets: AzureBlobStorageAccount, AzureBlobStorageAccessKey
- component: state.azure.tablestorage
required-secrets: AzureBlobStorageAccount, AzureBlobStorageAccessKey, AzureCertificationTenantId, AzureCertificationServicePrincipalClientId, AzureCertificationServicePrincipalClientSecret
- component: state.azure.blobstorage
required-secrets: AzureBlobStorageContainer,AzureBlobStorageAccount, AzureBlobStorageAccessKey, AzureCertificationTenantId, AzureCertificationServicePrincipalClientId, AzureCertificationServicePrincipalClientSecret
EOF
)
echo "::set-output name=cloud-components::$CRON_COMPONENTS"
@ -156,11 +150,13 @@ jobs:
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
- name: Configure certification test path
- name: Configure certification test and source path
run: |
TEST_COMPONENT=$(echo ${{ matrix.component }} | sed -E 's/\./\//g')
export TEST_PATH="${PROJECT_PATH}/tests/certification/${TEST_COMPONENT}"
echo "TEST_PATH=$TEST_PATH" >> $GITHUB_ENV
export SOURCE_PATH="github.com/dapr/components-contrib/${TEST_COMPONENT}"
echo "SOURCE_PATH=$SOURCE_PATH" >> $GITHUB_ENV
- uses: Azure/login@v1
with:
@ -194,18 +190,19 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: '^1.18'
go-version: '^1.19'
- name: Download Go dependencies
working-directory: ${{ env.TEST_PATH }}
run: |
go mod download
go install gotest.tools/gotestsum@latest
go install github.com/axw/gocov/gocov@v1.1.0
- name: Check that go mod tidy is up-to-date
working-directory: ${{ env.TEST_PATH }}
run: |
go mod tidy -compat=1.18
go mod tidy -compat=1.19
git diff --exit-code ./go.mod
git diff --exit-code ./go.sum
@ -218,8 +215,7 @@ jobs:
set +e
gotestsum --jsonfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.json \
--junitfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.xml --format standard-quiet -- \
-count=1 -timeout=15m
-coverprofile=cover.out -covermode=set -coverpkg=${{ env.SOURCE_PATH }}
status=$?
echo "Completed certification tests for ${{ matrix.component }} ... "
if test $status -ne 0; then
@ -228,6 +224,12 @@ jobs:
fi
set -e
COVERAGE_REPORT=$(gocov convert cover.out | gocov report)
COVERAGE_LINE=$(echo $COVERAGE_REPORT | grep -oP '(?<=Total Coverage:).*') # example: "80.00% (40/50)"
COVERAGE_PERCENTAGE=$(echo $COVERAGE_LINE | grep -oP '([0-9\.]*)' | head -n 1) # example "80.00"
echo "COVERAGE_LINE=$COVERAGE_LINE" >> $GITHUB_ENV
echo "COMPONENT_PERCENTAGE=$COVERAGE_PERCENTAGE" >> $GITHUB_ENV
# Fail the step if we found no test to run
if grep -q "\[no test files\]" ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.json ; then
echo "::error:: No certification test file was found for component ${{ matrix.component }}"
@ -245,6 +247,27 @@ jobs:
exit 1
fi
- name: Prepare Cert Coverage Info
run: |
mkdir -p tmp/cov_files
SOURCE_PATH_LINEAR=$(echo ${{ env.SOURCE_PATH }} |sed 's#/#\.#g') # converts slashes to dots in this string, so that it doesn't consider them sub-folders
echo "${{ env.COVERAGE_LINE }}" >> tmp/cov_files/$SOURCE_PATH_LINEAR.txt
- name: Upload Cert Coverage Artifact
uses: actions/upload-artifact@v3
with:
name: certtest_cov
path: tmp/cov_files
retention-days: 1
- name: Component Coverage Discord Notification
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_MONITORING_WEBHOOK_URL }}
uses: Ilshidur/action-discord@0c4b27844ba47cb1c7bee539c8eead5284ce9fa9
continue-on-error: true
with:
args: 'Cert Test Coverage for {{ SOURCE_PATH }} is {{ COVERAGE_LINE }}'
# Upload logs for test analytics to consume
- name: Upload test results
if: always()
@ -252,3 +275,41 @@ jobs:
with:
name: ${{ matrix.component }}_certification_test
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.*
post_job:
name: Notify Total coverage
runs-on: ubuntu-latest
needs: certification
if: always()
steps:
- name: Download Cert Coverage Artifact
uses: actions/download-artifact@v3
continue-on-error: true
id: download
with:
name: certtest_cov
path: tmp/cov_files
- name: Calculate total coverage
run: |
ls "${{steps.download.outputs.download-path}}" | while read f; do
while read LINE;
do
ratio=$(echo $LINE | cut -d "(" -f2 | cut -d ")" -f1)
tempNumerator=$(echo $ratio | cut -d'/' -f1)
tempDenominator=$(echo $ratio | cut -d'/' -f2)
export numerator=$(($numerator+$tempNumerator))
export denominator=$(($denominator+$tempDenominator))
totalPer=$(awk "BEGIN { print (($numerator / $denominator) * 100) }")
echo "totalPer=$totalPer" >> $GITHUB_ENV
done < ${{steps.download.outputs.download-path}}/$f
done
continue-on-error: true
- name: Final Coverage Discord Notification
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_MONITORING_WEBHOOK_URL }}
uses: Ilshidur/action-discord@0c4b27844ba47cb1c7bee539c8eead5284ce9fa9
continue-on-error: true
with:
args: 'Total Coverage for Certification Tests is {{ totalPer }}%'

View File

@ -29,11 +29,11 @@ jobs:
name: Build ${{ matrix.target_os }}_${{ matrix.target_arch }} binaries
runs-on: ${{ matrix.os }}
env:
GOVER: 1.18
GOVER: "1.19"
GOOS: ${{ matrix.target_os }}
GOARCH: ${{ matrix.target_arch }}
GOPROXY: https://proxy.golang.org
GOLANGCI_LINT_VER: v1.45.2
GOLANGCI_LINT_VER: "v1.48.0"
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
@ -51,12 +51,6 @@ jobs:
- os: macOS-latest
target_arch: arm
steps:
- name: Check if need skip
id: skip_check
uses: fkirc/skip-duplicate-actions@v3.4.0
with:
cancel_others: 'true'
paths_ignore: '["**.md", ".codecov.yaml", ".github/workflows/dapr-automerge.yml"]'
- name: Set up Go ${{ env.GOVER }}
if: ${{ steps.skip_check.outputs.should_skip != 'true' }}
uses: actions/setup-go@v2
@ -65,19 +59,52 @@ jobs:
- name: Check out code into the Go module directory
if: ${{ steps.skip_check.outputs.should_skip != 'true' }}
uses: actions/checkout@v2
- name: Cache Go modules (Linux)
if: matrix.target_os == 'linux'
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ matrix.target_os }}-${{ matrix.target_arch }}-go-${{ env.GOVER }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ matrix.target_os }}-${{ matrix.target_arch }}-go-${{ env.GOVER }}-
- name: Cache Go modules (Windows)
if: matrix.target_os == 'windows'
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ matrix.target_os }}-${{ matrix.target_arch }}-go-${{ env.GOVER }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ matrix.target_os }}-${{ matrix.target_arch }}-go-${{ env.GOVER }}-
- name: Cache Go modules (macOS)
if: matrix.target_os == 'darwin'
uses: actions/cache@v3
with:
path: |
~/Library/Caches/go-build
~/go/pkg/mod
key: ${{ matrix.target_os }}-${{ matrix.target_arch }}-go-${{ env.GOVER }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ matrix.target_os }}-${{ matrix.target_arch }}-go-${{ env.GOVER }}-
- name: Run golangci-lint
if: matrix.target_arch == 'amd64' && matrix.target_os == 'linux' && steps.skip_check.outputs.should_skip != 'true'
uses: golangci/golangci-lint-action@v3.1.0
uses: golangci/golangci-lint-action@v3.2.0
with:
version: ${{ env.GOLANGCI_LINT_VER }}
skip-cache: true
args: --timeout 15m
- name: Run go mod tidy check diff
if: matrix.target_arch == 'amd64' && matrix.target_os == 'linux' && steps.skip_check.outputs.should_skip != 'true'
run: make modtidy-all check-diff
- name: Run make test
env:
COVERAGE_OPTS: "-coverprofile=coverage.txt -covermode=atomic"
IPFS_TEST: "1"
if: matrix.target_arch != 'arm' && steps.skip_check.outputs.should_skip != 'true'
run: make test
- name: Codecov
if: matrix.target_arch == 'amd64' && matrix.target_os == 'linux'
uses: codecov/codecov-action@v1
uses: codecov/codecov-action@v3

View File

@ -25,24 +25,11 @@ on:
- release-*
jobs:
pre_job:
name: Skip Duplicate Actions
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v3.4.0
with:
cancel_others: 'true'
paths_ignore: '["**.md", ".codecov.yaml", ".github/workflows/dapr-automerge.yml"]'
# Based on whether this is a PR or a scheduled run, we will run a different
# subset of the conformance tests. This allows all the tests not requiring
# secrets to be executed on pull requests.
generate-matrix:
runs-on: ubuntu-latest
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' || github.event_name == 'repository_dispatch'
steps:
- name: Install yq
run: |
@ -59,6 +46,7 @@ jobs:
- bindings.mqtt-mosquitto
- bindings.mqtt-vernemq
- bindings.redis
- bindings.rabbitmq
- pubsub.aws.snssqs
- pubsub.hazelcast
- pubsub.in-memory
@ -74,12 +62,14 @@ jobs:
- secretstores.localenv
- secretstores.localfile
- state.cassandra
- state.memcached
- state.mongodb
- state.mysql
- state.postgresql
- state.redis
- state.sqlserver
- state.cockroachdb
- state.rethinkdb
EOF
)
echo "::set-output name=pr-components::$PR_COMPONENTS"
@ -134,6 +124,8 @@ jobs:
required-certs: AzureKeyVaultSecretStoreCert
- component: secretstores.azure.keyvault.serviceprincipal
required-secrets: AzureKeyVaultName,AzureKeyVaultSecretStoreTenantId,AzureKeyVaultSecretStoreServicePrincipalClientId,AzureKeyVaultSecretStoreServicePrincipalClientSecret
- component: bindings.azure.cosmosdb
required-secrets: AzureCosmosDBMasterKey,AzureCosmosDBUrl,AzureCosmosDB,AzureCosmosDBCollection
EOF
)
echo "::set-output name=cron-components::$CRON_COMPONENTS"
@ -247,6 +239,10 @@ jobs:
- name: Start kafka
run: docker-compose -f ./.github/infrastructure/docker-compose-kafka.yml -p kafka up -d
if: contains(matrix.component, 'kafka')
- name: Start memcached
run: docker-compose -f ./.github/infrastructure/docker-compose-memcached.yml -p memcached up -d
if: contains(matrix.component, 'memcached')
- name: Start natsstreaming
run: docker-compose -f ./.github/infrastructure/docker-compose-natsstreaming.yml -p natsstreaming up -d
@ -311,6 +307,11 @@ jobs:
docker-compose -f ./.github/infrastructure/docker-compose-cockroachdb.yml -p cockroachdb up -d
if: contains(matrix.component, 'cockroachdb')
- name: Start rethinkdb
run: |
docker-compose -f ./.github/infrastructure/docker-compose-rethinkdb.yml -p rethinkdb up -d
if: contains(matrix.component, 'rethinkdb')
- name: Setup KinD test data
if: contains(matrix.component, 'kubernetes')
run: |
@ -320,7 +321,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: '^1.18'
go-version: '^1.19'
- name: Download Go dependencies
run: |

View File

@ -45,7 +45,12 @@ jobs:
"shubham1172",
"skyao",
"msfussell",
"Taction"
"Taction",
"RyanLettieri",
"DeepanshuA",
"yash-nisar",
"addjuarez",
"tmacam",
];
const payload = context.payload;
const issue = context.issue;

View File

@ -4,7 +4,7 @@ run:
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 5m
deadline: 15m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
@ -244,6 +244,7 @@ linters:
- nestif
- nlreturn
- exhaustive
- exhaustruct
- noctx
- gci
- golint
@ -259,7 +260,6 @@ linters:
- godot
- cyclop
- varnamelen
- gosec
- errorlint
- forcetypeassert
- ifshort
@ -271,3 +271,8 @@ linters:
- wastedassign
- containedctx
- gosimple
- nonamedreturns
- asasalint
- rowserrcheck
- sqlclosecheck
- structcheck

View File

@ -22,40 +22,76 @@ export GOSUMDB ?= sum.golang.org
GIT_COMMIT = $(shell git rev-list -1 HEAD)
GIT_VERSION = $(shell git describe --always --abbrev=7 --dirty)
# By default, disable CGO_ENABLED. See the details on https://golang.org/cmd/cgo
CGO ?= 0
CGO ?= 0
LOCAL_ARCH := $(shell uname -m)
ifeq ($(LOCAL_ARCH),x86_64)
TARGET_ARCH_LOCAL=amd64
TARGET_ARCH_LOCAL=amd64
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 5),armv8)
TARGET_ARCH_LOCAL=arm64
TARGET_ARCH_LOCAL=arm64
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 4),armv)
TARGET_ARCH_LOCAL=arm
TARGET_ARCH_LOCAL=arm
else
TARGET_ARCH_LOCAL=amd64
TARGET_ARCH_LOCAL=amd64
endif
export GOARCH ?= $(TARGET_ARCH_LOCAL)
LOCAL_OS := $(shell uname)
ifeq ($(LOCAL_OS),Linux)
TARGET_OS_LOCAL = linux
TARGET_OS_LOCAL = linux
else ifeq ($(LOCAL_OS),Darwin)
TARGET_OS_LOCAL = darwin
TARGET_OS_LOCAL = darwin
else
TARGET_OS_LOCAL ?= windows
TARGET_OS_LOCAL ?= windows
endif
export GOOS ?= $(TARGET_OS_LOCAL)
ifeq ($(GOOS),windows)
BINARY_EXT_LOCAL:=.exe
GOLANGCI_LINT:=golangci-lint.exe
# Workaround for https://github.com/golang/go/issues/40795
BUILDMODE:=-buildmode=exe
FINDBIN := where
BINARY_EXT_LOCAL:=.exe
GOLANGCI_LINT:=golangci-lint.exe
# Workaround for https://github.com/golang/go/issues/40795
BUILDMODE:=-buildmode=exe
else
BINARY_EXT_LOCAL:=
GOLANGCI_LINT:=golangci-lint
FINDBIN := which
BINARY_EXT_LOCAL:=
GOLANGCI_LINT:=golangci-lint
endif
# Get linter versions
LINTER_BINARY := $(shell $(FINDBIN) $(GOLANGCI_LINT))
export GH_LINT_VERSION := $(shell grep 'GOLANGCI_LINT_VER:' .github/workflows/components-contrib.yml | xargs | cut -d" " -f2)
ifeq (,$(LINTER_BINARY))
INSTALLED_LINT_VERSION := "v0.0.0"
else
INSTALLED_LINT_VERSION=v$(shell $(LINTER_BINARY) version | grep -Eo '([0-9]+\.)+[0-9]+' - || "")
endif
################################################################################
# Linter targets #
################################################################################
.PHONY: verify-linter-installed
verify-linter-installed:
@if [ -z $(LINTER_BINARY) ]; then \
echo "[!] golangci-lint not installed"; \
echo "[!] You can install it from https://golangci-lint.run/usage/install/"; \
echo "[!] or by running"; \
echo "[!] curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin $(GH_LINT_VERSION)"; \
exit 1; \
fi;
.PHONY: verify-linter-version
verify-linter-version:
@if [ "$(GH_LINT_VERSION)" != "$(INSTALLED_LINT_VERSION)" ]; then \
echo "[!] Your locally installed version of golangci-lint is different from the pipeline"; \
echo "[!] This will likely cause linting issues for you locally"; \
echo "[!] Yours: $(INSTALLED_LINT_VERSION)"; \
echo "[!] Theirs: $(GH_LINT_VERSION)"; \
echo "[!] Upgrade: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin $(GH_LINT_VERSION)"; \
sleep 3; \
fi;
################################################################################
# Target: test #
################################################################################
@ -67,7 +103,7 @@ test:
# Target: lint #
################################################################################
.PHONY: lint
lint:
lint: verify-linter-installed verify-linter-version
# Due to https://github.com/golangci/golangci-lint/issues/580, we need to add --fix for windows
$(GOLANGCI_LINT) run --timeout=20m
@ -79,7 +115,7 @@ MODFILES := $(shell find . -name go.mod)
define modtidy-target
.PHONY: modtidy-$(1)
modtidy-$(1):
cd $(shell dirname $(1)); go mod tidy -compat=1.18; cd -
cd $(shell dirname $(1)); go mod tidy -compat=1.19; cd -
endef
# Generate modtidy target action for each go.mod file

View File

@ -53,12 +53,12 @@ type outgoingWebhook struct {
handler bindings.Handler
}
var webhooks = struct { //nolint: gochecknoglobals
var webhooks = struct { //nolint:gochecknoglobals
sync.RWMutex
m map[string]*outgoingWebhook
}{m: make(map[string]*outgoingWebhook)}
func NewDingTalkWebhook(l logger.Logger) *DingTalkWebhook {
func NewDingTalkWebhook(l logger.Logger) bindings.InputOutputBinding {
// See guidance on proper HTTP client settings here:
// https://medium.com/@nate510/don-t-use-go-s-default-http-client-4804cb19f779
dialer := &net.Dialer{ //nolint:exhaustivestruct
@ -162,7 +162,7 @@ func (t *DingTalkWebhook) sendMessage(ctx context.Context, req *bindings.InvokeR
ctx, cancel := context.WithTimeout(ctx, defaultHTTPClientTimeout)
defer cancel()
httpReq, err := http.NewRequestWithContext(ctx, "POST", postURL, bytes.NewReader(msg))
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, postURL, bytes.NewReader(msg))
if err != nil {
return fmt.Errorf("dingtalk webhook error: new request failed. %w", err)
}

View File

@ -15,7 +15,7 @@ package webhook
import (
"context"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"sync/atomic"
@ -26,6 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -43,17 +44,17 @@ func TestPublishMsg(t *testing.T) { //nolint:paralleltest
t.Errorf("Expected request to '/test', got '%s'", r.URL.EscapedPath())
}
body, err := ioutil.ReadAll(r.Body)
body, err := io.ReadAll(r.Body)
require.Nil(t, err)
assert.Equal(t, msg, string(body))
}))
defer ts.Close()
m := bindings.Metadata{Name: "test", Properties: map[string]string{
m := bindings.Metadata{Base: metadata.Base{Name: "test", Properties: map[string]string{
"url": ts.URL + "/test",
"secret": "",
"id": "x",
}}
}}}
d := NewDingTalkWebhook(logger.NewLogger("test"))
err := d.Init(m)
@ -67,14 +68,14 @@ func TestPublishMsg(t *testing.T) { //nolint:paralleltest
func TestBindingReadAndInvoke(t *testing.T) { //nolint:paralleltest
msg := "{\"type\": \"text\",\"text\": {\"content\": \"hello\"}}"
m := bindings.Metadata{
m := bindings.Metadata{Base: metadata.Base{
Name: "test",
Properties: map[string]string{
"url": "/test",
"secret": "",
"id": "x",
},
}
}}
d := NewDingTalkWebhook(logger.NewLogger("test"))
err := d.Init(m)

View File

@ -25,12 +25,12 @@ import (
)
const (
metadataRpcGroup = "group"
metadataRpcVersion = "version"
metadataRpcInterface = "interfaceName"
metadataRpcMethodName = "methodName"
metadataRpcProviderHostname = "providerHostname"
metadataRpcProviderPort = "providerPort"
metadataRPCGroup = "group"
metadataRPCVersion = "version"
metadataRPCInterface = "interfaceName"
metadataRPCMethodName = "methodName"
metadataRPCProviderHostname = "providerHostname"
metadataRPCProviderPort = "providerPort"
)
type dubboContext struct {
@ -47,12 +47,12 @@ type dubboContext struct {
func newDubboContext(metadata map[string]string) *dubboContext {
dubboMetadata := &dubboContext{}
dubboMetadata.group = metadata[metadataRpcGroup]
dubboMetadata.interfaceName = metadata[metadataRpcInterface]
dubboMetadata.version = metadata[metadataRpcVersion]
dubboMetadata.method = metadata[metadataRpcMethodName]
dubboMetadata.hostname = metadata[metadataRpcProviderHostname]
dubboMetadata.port = metadata[metadataRpcProviderPort]
dubboMetadata.group = metadata[metadataRPCGroup]
dubboMetadata.interfaceName = metadata[metadataRPCInterface]
dubboMetadata.version = metadata[metadataRPCVersion]
dubboMetadata.method = metadata[metadataRPCMethodName]
dubboMetadata.hostname = metadata[metadataRPCProviderHostname]
dubboMetadata.port = metadata[metadataRPCProviderPort]
dubboMetadata.inited = false
return dubboMetadata
}

View File

@ -37,7 +37,7 @@ type DubboOutputBinding struct {
var dubboBinding *DubboOutputBinding
func NewDubboOutput(logger logger.Logger) *DubboOutputBinding {
func NewDubboOutput(logger logger.Logger) bindings.OutputBinding {
if dubboBinding == nil {
dubboBinding = &DubboOutputBinding{
ctxCache: make(map[string]*dubboContext),

View File

@ -78,10 +78,10 @@ func TestInvoke(t *testing.T) {
// 3. invoke dapr dubbo output binding, get rsp bytes
rsp, err := output.Invoke(context.Background(), &bindings.InvokeRequest{
Metadata: map[string]string{
metadataRpcProviderPort: dubboPort,
metadataRpcProviderHostname: localhostIP,
metadataRpcMethodName: methodName,
metadataRpcInterface: providerInterfaceName,
metadataRPCProviderPort: dubboPort,
metadataRPCProviderHostname: localhostIP,
metadataRPCMethodName: methodName,
metadataRPCInterface: providerInterfaceName,
},
Data: reqData,
Operation: bindings.GetOperation,

View File

@ -11,6 +11,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:nosnakecase
package dubbo
import (

View File

@ -54,12 +54,12 @@ type Nacos struct {
watches []configParam
servers []constant.ServerConfig
logger logger.Logger
configClient config_client.IConfigClient
configClient config_client.IConfigClient //nolint:nosnakecase
readHandler func(ctx context.Context, response *bindings.ReadResponse) ([]byte, error)
}
// NewNacos returns a new Nacos instance.
func NewNacos(logger logger.Logger) *Nacos {
func NewNacos(logger logger.Logger) bindings.OutputBinding {
return &Nacos{
logger: logger,
watchesLock: sync.Mutex{},

View File

@ -25,15 +25,16 @@ import (
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
func TestInputBindingRead(t *testing.T) { //nolint:paralleltest
m := bindings.Metadata{Name: "test", Properties: nil}
m := bindings.Metadata{Base: metadata.Base{Name: "test", Properties: nil}}
var err error
m.Properties, err = getNacosLocalCacheMetadata()
require.NoError(t, err)
n := NewNacos(logger.NewLogger("test"))
n := NewNacos(logger.NewLogger("test")).(*Nacos)
err = n.Init(m)
require.NoError(t, err)
var count int32
@ -68,7 +69,7 @@ func getNacosLocalCacheMetadata() (map[string]string, error) {
}
cfgFile := path.Join(tmpDir, fmt.Sprintf("%s@@%s@@", dataID, group))
file, err := os.OpenFile(cfgFile, os.O_RDWR|os.O_CREATE, os.ModePerm)
file, err := os.OpenFile(cfgFile, os.O_RDWR|os.O_CREATE, os.ModePerm) //nolint:nosnakecase
if err != nil || file == nil {
return nil, fmt.Errorf("open %s failed. %w", cfgFile, err)
}

View File

@ -40,7 +40,7 @@ type ossMetadata struct {
}
// NewAliCloudOSS returns a new instance.
func NewAliCloudOSS(logger logger.Logger) *AliCloudOSS {
func NewAliCloudOSS(logger logger.Logger) bindings.OutputBinding {
return &AliCloudOSS{logger: logger}
}

View File

@ -90,11 +90,12 @@ func (a *AliCloudRocketMQ) Read(ctx context.Context, handler bindings.Handler) e
if topicStr == "" {
continue
}
mqType, mqExpression, topic, err := parseTopic(topicStr)
if err != nil {
var mqType, mqExpression, topic string
if mqType, mqExpression, topic, err = parseTopic(topicStr); err != nil {
return err
}
if err := consumer.Subscribe(
if err = consumer.Subscribe(
topic,
mqc.MessageSelector{
Type: mqc.ExpressionType(mqType),
@ -106,7 +107,7 @@ func (a *AliCloudRocketMQ) Read(ctx context.Context, handler bindings.Handler) e
}
}
if err := consumer.Start(); err != nil {
if err = consumer.Start(); err != nil {
return fmt.Errorf("binding-rocketmq: consumer start failed. %w", err)
}
@ -121,7 +122,7 @@ func (a *AliCloudRocketMQ) Read(ctx context.Context, handler bindings.Handler) e
innerErr := consumer.Shutdown()
if innerErr != nil && !errors.Is(innerErr, context.Canceled) {
a.logger.Warnf("binding-rocketmq: error while shutting down consumer: %v")
a.logger.Warnf("binding-rocketmq: error while shutting down consumer: %v", innerErr)
}
}()

View File

@ -0,0 +1,127 @@
package sls
import (
"context"
"encoding/json"
"fmt"
"time"
sls "github.com/aliyun/aliyun-log-go-sdk"
"github.com/aliyun/aliyun-log-go-sdk/producer"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/kit/config"
"github.com/dapr/kit/logger"
)
type AliCloudSlsLogstorage struct {
logger logger.Logger
producer *producer.Producer
metadata SlsLogstorageMetadata
}
type SlsLogstorageMetadata struct {
Endpoint string `json:"endpoint"`
AccessKeyID string `json:"accessKeyID"`
AccessKeySecret string `json:"accessKeySecret"`
}
type Callback struct {
s *AliCloudSlsLogstorage
}
// parse metadata field
func (s *AliCloudSlsLogstorage) Init(metadata bindings.Metadata) error {
m, err := s.parseMeta(metadata)
if err != nil {
return err
}
s.metadata = *m
producerConfig := producer.GetDefaultProducerConfig()
// the config properties in the component yaml file
producerConfig.Endpoint = m.Endpoint
producerConfig.AccessKeyID = m.AccessKeyID
producerConfig.AccessKeySecret = m.AccessKeySecret
s.producer = producer.InitProducer(producerConfig)
s.producer.Start()
return nil
}
func NewAliCloudSlsLogstorage(logger logger.Logger) bindings.OutputBinding {
logger.Debug("initialized Sls log storage binding component")
s := &AliCloudSlsLogstorage{
logger: logger,
}
return s
}
func (s *AliCloudSlsLogstorage) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
// verify the metadata property
if logProject := req.Metadata["project"]; logProject == "" {
return nil, fmt.Errorf("SLS binding error: project property not supplied")
}
if logstore := req.Metadata["logstore"]; logstore == "" {
return nil, fmt.Errorf("SLS binding error: logstore property not supplied")
}
if topic := req.Metadata["topic"]; topic == "" {
return nil, fmt.Errorf("SLS binding error: topic property not supplied")
}
if source := req.Metadata["source"]; source == "" {
return nil, fmt.Errorf("SLS binding error: source property not supplied")
}
log, err := s.parseLog(req)
if err != nil {
s.logger.Info(err)
return nil, err
}
s.logger.Debug(log)
callBack := &Callback{}
err = s.producer.SendLogWithCallBack(req.Metadata["project"], req.Metadata["logstore"], req.Metadata["topic"], req.Metadata["source"], log, callBack)
if err != nil {
s.logger.Info(err)
return nil, err
}
return nil, nil
}
// parse the log content
func (s *AliCloudSlsLogstorage) parseLog(req *bindings.InvokeRequest) (*sls.Log, error) {
var logInfo map[string]string
err := json.Unmarshal(req.Data, &logInfo)
if err != nil {
return nil, err
}
return producer.GenerateLog(uint32(time.Now().Unix()), logInfo), nil
}
func (s *AliCloudSlsLogstorage) parseMeta(metadata bindings.Metadata) (*SlsLogstorageMetadata, error) {
var m SlsLogstorageMetadata
err := config.Decode(metadata.Properties, &m)
if err != nil {
return nil, err
}
return &m, nil
}
func (s *AliCloudSlsLogstorage) Operations() []bindings.OperationKind {
return []bindings.OperationKind{bindings.CreateOperation}
}
func (callback *Callback) Success(result *producer.Result) {
}
func (callback *Callback) Fail(result *producer.Result) {
msg := "unknown reason"
if result.GetErrorMessage() != "" {
msg = result.GetErrorMessage()
}
if result.GetErrorCode() != "" {
callback.s.logger.Debug("Failed error code:", result.GetErrorCode())
}
callback.s.logger.Info("Log storage failed:", msg)
}

View File

@ -0,0 +1,59 @@
package sls
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
)
/**
* test the metadata in the yaml file
*/
func TestSlsLogstorageMetadata(t *testing.T) {
m := bindings.Metadata{}
m.Properties = map[string]string{
"Endpoint": "ENDPOINT",
"AccessKeyID": "ACCESSKEYID",
"AccessKeySecret": "ACCESSKEYSECRET",
}
aliCloudSlsLogstorage := AliCloudSlsLogstorage{}
meta, err := aliCloudSlsLogstorage.parseMeta(m)
assert.Nil(t, err)
assert.Equal(t, "ENDPOINT", meta.Endpoint)
assert.Equal(t, "ACCESSKEYID", meta.AccessKeyID)
assert.Equal(t, "ACCESSKEYSECRET", meta.AccessKeySecret)
}
/*
* test the log content
*/
func TestParseLog(t *testing.T) {
aliCloudSlsLogstorage := AliCloudSlsLogstorage{}
d, _ := json.Marshal(map[string]string{
"log1": "LOG1",
"log2": "LOG2",
})
log := bindings.InvokeRequest{
Data: d,
Metadata: map[string]string{
"project": "PROJECT",
"logstore": "LOGSTORE",
"topic": "TOPIC",
"source": "SOURCE",
},
}
parseLog, _ := aliCloudSlsLogstorage.parseLog(&log)
for _, v := range parseLog.Contents {
switch *v.Key {
case "log1":
assert.Equal(t, "LOG1", *v.Value)
case "log2":
assert.Equal(t, "LOG2", *v.Value)
}
}
}

View File

@ -51,7 +51,7 @@ type AliCloudTableStore struct {
metadata tablestoreMetadata
}
func NewAliCloudTableStore(log logger.Logger) *AliCloudTableStore {
func NewAliCloudTableStore(log logger.Logger) bindings.OutputBinding {
return &AliCloudTableStore{
logger: log,
client: nil,
@ -262,11 +262,11 @@ func (s *AliCloudTableStore) create(req *bindings.InvokeRequest, resp *bindings.
TableName: s.getTableName(req.Metadata),
PrimaryKey: &tablestore.PrimaryKey{PrimaryKeys: pks},
Columns: columns,
ReturnType: tablestore.ReturnType_RT_NONE,
ReturnType: tablestore.ReturnType_RT_NONE, //nolint:nosnakecase
TransactionId: nil,
}
change.SetCondition(tablestore.RowExistenceExpectation_IGNORE)
change.SetCondition(tablestore.RowExistenceExpectation_IGNORE) //nolint:nosnakecase
putRequest := &tablestore.PutRowRequest{
PutRowChange: &change,
@ -301,7 +301,7 @@ func (s *AliCloudTableStore) delete(req *bindings.InvokeRequest, resp *bindings.
TableName: s.getTableName(req.Metadata),
PrimaryKey: &tablestore.PrimaryKey{PrimaryKeys: pks},
}
change.SetCondition(tablestore.RowExistenceExpectation_IGNORE)
change.SetCondition(tablestore.RowExistenceExpectation_IGNORE) //nolint:nosnakecase
deleteReq := &tablestore.DeleteRowRequest{DeleteRowChange: change}
_, err = s.client.DeleteRow(deleteReq)

View File

@ -22,6 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -47,9 +48,9 @@ func TestDataEncodeAndDecode(t *testing.T) {
aliCloudTableStore := NewAliCloudTableStore(logger.NewLogger("test"))
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: getTestProperties(),
}
}}
aliCloudTableStore.Init(metadata)
// test create

View File

@ -65,7 +65,7 @@ type APNS struct {
}
// NewAPNS will create a new APNS output binding.
func NewAPNS(logger logger.Logger) *APNS {
func NewAPNS(logger logger.Logger) bindings.OutputBinding {
return &APNS{
logger: logger,
client: &http.Client{},

View File

@ -16,7 +16,7 @@ package apns
import (
"bytes"
"context"
"io/ioutil"
"io"
"net/http"
"strings"
"testing"
@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -41,136 +42,136 @@ func TestInit(t *testing.T) {
testLogger := logger.NewLogger("test")
t.Run("uses the development service", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
developmentKey: "true",
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Nil(t, err)
assert.Equal(t, developmentPrefix, binding.urlPrefix)
})
t.Run("uses the production service", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
developmentKey: "false",
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Nil(t, err)
assert.Equal(t, productionPrefix, binding.urlPrefix)
})
t.Run("defaults to the production service", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Nil(t, err)
assert.Equal(t, productionPrefix, binding.urlPrefix)
})
t.Run("invalid development value", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
developmentKey: "True",
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Error(t, err, "invalid value for development parameter: True")
})
t.Run("the key ID is required", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Error(t, err, "the key-id parameter is required")
})
t.Run("valid key ID", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Nil(t, err)
assert.Equal(t, testKeyID, binding.authorizationBuilder.keyID)
})
t.Run("the team ID is required", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
keyIDKey: testKeyID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Error(t, err, "the team-id parameter is required")
})
t.Run("valid team ID", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Nil(t, err)
assert.Equal(t, testTeamID, binding.authorizationBuilder.teamID)
})
t.Run("the private key is required", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
keyIDKey: testKeyID,
teamIDKey: testTeamID,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Error(t, err, "the private-key parameter is required")
})
t.Run("valid private key", func(t *testing.T) {
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
binding := NewAPNS(testLogger)
}}
binding := NewAPNS(testLogger).(*APNS)
err := binding.Init(metadata)
assert.Nil(t, err)
assert.NotNil(t, binding.authorizationBuilder.privateKey)
@ -179,7 +180,7 @@ func TestInit(t *testing.T) {
func TestOperations(t *testing.T) {
testLogger := logger.NewLogger("test")
testBinding := NewAPNS(testLogger)
testBinding := NewAPNS(testLogger).(*APNS)
operations := testBinding.Operations()
assert.Equal(t, 1, len(operations))
assert.Equal(t, bindings.CreateOperation, operations[0])
@ -316,7 +317,7 @@ func TestInvoke(t *testing.T) {
return &http.Response{
StatusCode: http.StatusBadRequest,
Body: ioutil.NopCloser(strings.NewReader(body)),
Body: io.NopCloser(strings.NewReader(body)),
}
})
_, err := testBinding.Invoke(context.TODO(), successRequest)
@ -325,15 +326,15 @@ func TestInvoke(t *testing.T) {
}
func makeTestBinding(t *testing.T, log logger.Logger) *APNS {
testBinding := NewAPNS(log)
bindingMetadata := bindings.Metadata{
testBinding := NewAPNS(log).(*APNS)
bindingMetadata := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
developmentKey: "true",
keyIDKey: testKeyID,
teamIDKey: testTeamID,
privateKeyKey: testPrivateKey,
},
}
}}
err := testBinding.Init(bindingMetadata)
assert.Nil(t, err)

View File

@ -65,7 +65,8 @@ func (a *authorizationBuilder) generateAuthorizationHeader() (string, error) {
a.logger.Debug("Authorization token expired; generating new token")
now := time.Now()
claims := jwt.StandardClaims{
// TODO: Use jwt.RegisteredClaims instead of jwt.StandardClaims.
claims := jwt.StandardClaims{ //nolint:staticcheck
IssuedAt: time.Now().Unix(),
Issuer: a.teamID,
}

View File

@ -15,7 +15,7 @@ limitations under the License.
// send push notifications to Apple devices and Mac computers using Apple's
// Push Notification Service (APNS).
//
// Configuring the Binding
// # Configuring the Binding
//
// To use the APNS output binding, you will need to create the binding
// configuration and add it to your components directory. The binding
@ -31,37 +31,37 @@ limitations under the License.
//
// A sample configuration file for the APNS binding is shown below:
//
// apiVersion: dapr.io/v1alpha1
// kind: Component
// metadata:
// name: apns
// namespace: default
// spec:
// type: bindings.apns
// metadata:
// - name: development
// value: false
// - name: key-id
// value: PUT-KEY-ID-HERE
// - name: team-id
// value: PUT-APPLE-TEAM-ID-HERE
// - name: private-key
// secretKeyRef:
// name: apns-secrets
// key: private-key
// apiVersion: dapr.io/v1alpha1
// kind: Component
// metadata:
// name: apns
// namespace: default
// spec:
// type: bindings.apns
// metadata:
// - name: development
// value: false
// - name: key-id
// value: PUT-KEY-ID-HERE
// - name: team-id
// value: PUT-APPLE-TEAM-ID-HERE
// - name: private-key
// secretKeyRef:
// name: apns-secrets
// key: private-key
//
// If using Kubernetes, a sample secret configuration may look like this:
//
// apiVersion: v1
// kind: Secret
// metadata:
// name: apns-secrets
// namespace: default
// stringData:
// private-key: |
// -----BEGIN PRIVATE KEY-----
// KEY-DATA-GOES-HERE
// -----END PRIVATE KEY-----
// apiVersion: v1
// kind: Secret
// metadata:
// name: apns-secrets
// namespace: default
// stringData:
// private-key: |
// -----BEGIN PRIVATE KEY-----
// KEY-DATA-GOES-HERE
// -----END PRIVATE KEY-----
//
// The development parameter can be either "true" or "false". The development
// parameter controls which APNS service is used. If development is set to
@ -70,7 +70,7 @@ limitations under the License.
// be used to send push notifications. If not specified, the production service
// will be chosen by default.
//
// Push Notification Format
// # Push Notification Format
//
// The APNS binding is a pass-through wrapper over the Apple Push Notification
// Service. The APNS binding will send the request directly to the APNS service
@ -81,14 +81,14 @@ limitations under the License.
// Requests sent to the APNS binding should be a JSON object. A simple push
// notification appears below:
//
// {
// "aps": {
// "alert": {
// "title": "New Updates!",
// "body": "New updates are now available for your review."
// }
// }
// }
// {
// "aps": {
// "alert": {
// "title": "New Updates!",
// "body": "New updates are now available for your review."
// }
// }
// }
//
// The aps child object contains the push notification details that are used
// by the Apple Push Notification Service and target devices to route and show
@ -124,27 +124,27 @@ limitations under the License.
// notifications from a chat room may have the same identifier causing them
// to show up together in the device's notifications list.
//
// Sending a Push Notification Using the APNS Binding
// # Sending a Push Notification Using the APNS Binding
//
// A simple request to the APNS binding looks like this:
//
// {
// "data": {
// "aps": {
// "alert": {
// "title": "New Updates!",
// "body": "New updates are available for your review."
// }
// }
// },
// "metadata": {
// "device-token": "PUT-DEVICE-TOKEN-HERE",
// "apns-push-type": "alert",
// "apns-priority": "10",
// "apns-topic": "com.example.helloworld"
// },
// "operation": "create"
// }
// {
// "data": {
// "aps": {
// "alert": {
// "title": "New Updates!",
// "body": "New updates are available for your review."
// }
// }
// },
// "metadata": {
// "device-token": "PUT-DEVICE-TOKEN-HERE",
// "apns-push-type": "alert",
// "apns-priority": "10",
// "apns-topic": "com.example.helloworld"
// },
// "operation": "create"
// }
//
// The device-token metadata field is required and should contain the token
// for the device that will receive the push notification. Only one device
@ -158,9 +158,9 @@ limitations under the License.
// the apns-id metadata value, then the Apple Push Notification Serivce will
// generate a unique ID and will return it.
//
// {
// "messageID": "12345678-1234-1234-1234-1234567890AB"
// }
// {
// "messageID": "12345678-1234-1234-1234-1234567890AB"
// }
//
// If the push notification could not be sent due to an authentication error
// or payload error, the error code returned by Apple will be returned. For

View File

@ -23,7 +23,7 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
aws_auth "github.com/dapr/components-contrib/internal/authentication/aws"
awsAuth "github.com/dapr/components-contrib/internal/authentication/aws"
"github.com/dapr/kit/logger"
)
@ -44,7 +44,7 @@ type dynamoDBMetadata struct {
}
// NewDynamoDB returns a new DynamoDB instance.
func NewDynamoDB(logger logger.Logger) *DynamoDB {
func NewDynamoDB(logger logger.Logger) bindings.OutputBinding {
return &DynamoDB{logger: logger}
}
@ -104,7 +104,7 @@ func (d *DynamoDB) getDynamoDBMetadata(spec bindings.Metadata) (*dynamoDBMetadat
}
func (d *DynamoDB) getClient(metadata *dynamoDBMetadata) (*dynamodb.DynamoDB, error) {
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
sess, err := awsAuth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
if err != nil {
return nil, err
}

View File

@ -30,7 +30,7 @@ import (
"github.com/vmware/vmware-go-kcl/clientlibrary/worker"
"github.com/dapr/components-contrib/bindings"
aws_auth "github.com/dapr/components-contrib/internal/authentication/aws"
awsAuth "github.com/dapr/components-contrib/internal/authentication/aws"
"github.com/dapr/kit/logger"
)
@ -82,7 +82,7 @@ type recordProcessor struct {
}
// NewAWSKinesis returns a new AWS Kinesis instance.
func NewAWSKinesis(logger logger.Logger) *AWSKinesis {
func NewAWSKinesis(logger logger.Logger) bindings.InputOutputBinding {
return &AWSKinesis{logger: logger}
}
@ -313,7 +313,7 @@ func (a *AWSKinesis) waitUntilConsumerExists(ctx aws.Context, input *kinesis.Des
}
func (a *AWSKinesis) getClient(metadata *kinesisMetadata) (*kinesis.Kinesis, error) {
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
sess, err := awsAuth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
if err != nil {
return nil, err
}

View File

@ -32,7 +32,7 @@ import (
"github.com/google/uuid"
"github.com/dapr/components-contrib/bindings"
aws_auth "github.com/dapr/components-contrib/internal/authentication/aws"
awsAuth "github.com/dapr/components-contrib/internal/authentication/aws"
"github.com/dapr/components-contrib/internal/utils"
"github.com/dapr/kit/logger"
)
@ -84,7 +84,7 @@ type listPayload struct {
}
// NewAWSS3 returns a new AWSS3 instance.
func NewAWSS3(logger logger.Logger) *AWSS3 {
func NewAWSS3(logger logger.Logger) bindings.OutputBinding {
return &AWSS3{logger: logger}
}
@ -313,7 +313,7 @@ func (s *AWSS3) parseMetadata(metadata bindings.Metadata) (*s3Metadata, error) {
}
func (s *AWSS3) getSession(metadata *s3Metadata) (*session.Session, error) {
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
sess, err := awsAuth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
if err != nil {
return nil, err
}

View File

@ -143,7 +143,7 @@ func TestMergeWithRequestMetadata(t *testing.T) {
}
func TestGetOption(t *testing.T) {
s3 := NewAWSS3(logger.NewLogger("s3"))
s3 := NewAWSS3(logger.NewLogger("s3")).(*AWSS3)
s3.metadata = &s3Metadata{}
t.Run("return error if key is missing", func(t *testing.T) {
@ -154,7 +154,7 @@ func TestGetOption(t *testing.T) {
}
func TestDeleteOption(t *testing.T) {
s3 := NewAWSS3(logger.NewLogger("s3"))
s3 := NewAWSS3(logger.NewLogger("s3")).(*AWSS3)
s3.metadata = &s3Metadata{}
t.Run("return error if key is missing", func(t *testing.T) {

View File

@ -23,7 +23,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
aws_auth "github.com/dapr/components-contrib/internal/authentication/aws"
awsAuth "github.com/dapr/components-contrib/internal/authentication/aws"
"github.com/aws/aws-sdk-go/service/ses"
@ -56,7 +56,7 @@ type sesMetadata struct {
}
// NewAWSSES creates a new AWSSES binding instance.
func NewAWSSES(logger logger.Logger) *AWSSES {
func NewAWSSES(logger logger.Logger) bindings.OutputBinding {
return &AWSSES{logger: logger}
}
@ -192,7 +192,7 @@ func (metadata sesMetadata) mergeWithRequestMetadata(req *bindings.InvokeRequest
}
func (a *AWSSES) getClient(metadata *sesMetadata) (*ses.SES, error) {
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, "")
sess, err := awsAuth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, "")
if err != nil {
return nil, fmt.Errorf("SES binding error: error creating AWS session %w", err)
}

View File

@ -21,7 +21,7 @@ import (
"github.com/aws/aws-sdk-go/service/sns"
"github.com/dapr/components-contrib/bindings"
aws_auth "github.com/dapr/components-contrib/internal/authentication/aws"
awsAuth "github.com/dapr/components-contrib/internal/authentication/aws"
"github.com/dapr/kit/logger"
)
@ -48,7 +48,7 @@ type dataPayload struct {
}
// NewAWSSNS creates a new AWSSNS binding instance.
func NewAWSSNS(logger logger.Logger) *AWSSNS {
func NewAWSSNS(logger logger.Logger) bindings.OutputBinding {
return &AWSSNS{logger: logger}
}
@ -84,7 +84,7 @@ func (a *AWSSNS) parseMetadata(metadata bindings.Metadata) (*snsMetadata, error)
}
func (a *AWSSNS) getClient(metadata *snsMetadata) (*sns.SNS, error) {
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
sess, err := awsAuth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
if err != nil {
return nil, err
}

View File

@ -22,7 +22,7 @@ import (
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/dapr/components-contrib/bindings"
aws_auth "github.com/dapr/components-contrib/internal/authentication/aws"
awsAuth "github.com/dapr/components-contrib/internal/authentication/aws"
"github.com/dapr/kit/logger"
)
@ -44,7 +44,7 @@ type sqsMetadata struct {
}
// NewAWSSQS returns a new AWS SQS instance.
func NewAWSSQS(logger logger.Logger) *AWSSQS {
func NewAWSSQS(logger logger.Logger) bindings.InputOutputBinding {
return &AWSSQS{logger: logger}
}
@ -149,7 +149,7 @@ func (a *AWSSQS) parseSQSMetadata(metadata bindings.Metadata) (*sqsMetadata, err
}
func (a *AWSSQS) getClient(metadata *sqsMetadata) (*sqs.SQS, error) {
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
sess, err := awsAuth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
if err != nil {
return nil, err
}

View File

@ -105,7 +105,7 @@ type listPayload struct {
}
// NewAzureBlobStorage returns a new Azure Blob Storage instance.
func NewAzureBlobStorage(logger logger.Logger) *AzureBlobStorage {
func NewAzureBlobStorage(logger logger.Logger) bindings.OutputBinding {
return &AzureBlobStorage{logger: logger}
}

View File

@ -26,7 +26,7 @@ import (
func TestParseMetadata(t *testing.T) {
m := bindings.Metadata{}
blobStorage := NewAzureBlobStorage(logger.NewLogger("test"))
blobStorage := NewAzureBlobStorage(logger.NewLogger("test")).(*AzureBlobStorage)
t.Run("parse all metadata", func(t *testing.T) {
m.Properties = map[string]string{
@ -83,7 +83,7 @@ func TestParseMetadata(t *testing.T) {
}
func TestGetOption(t *testing.T) {
blobStorage := NewAzureBlobStorage(logger.NewLogger("test"))
blobStorage := NewAzureBlobStorage(logger.NewLogger("test")).(*AzureBlobStorage)
t.Run("return error if blobName is missing", func(t *testing.T) {
r := bindings.InvokeRequest{}
@ -95,7 +95,7 @@ func TestGetOption(t *testing.T) {
}
func TestDeleteOption(t *testing.T) {
blobStorage := NewAzureBlobStorage(logger.NewLogger("test"))
blobStorage := NewAzureBlobStorage(logger.NewLogger("test")).(*AzureBlobStorage)
t.Run("return error if blobName is missing", func(t *testing.T) {
r := bindings.InvokeRequest{}

View File

@ -20,8 +20,7 @@ import (
"strings"
"time"
"github.com/a8m/documentdb"
backoff "github.com/cenkalti/backoff/v4"
"github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/internal/authentication/azure"
@ -30,8 +29,7 @@ import (
// CosmosDB allows performing state operations on collections.
type CosmosDB struct {
client *documentdb.DocumentDB
collection string
client *azcosmos.ContainerClient
partitionKey string
logger logger.Logger
@ -45,10 +43,11 @@ type cosmosDBCredentials struct {
PartitionKey string `json:"partitionKey"`
}
const statusTooManyRequests = "429" // RFC 6585, 4
// Value used for timeout durations
const timeoutValue = 30
// NewCosmosDB returns a new CosmosDB instance.
func NewCosmosDB(logger logger.Logger) *CosmosDB {
func NewCosmosDB(logger logger.Logger) bindings.OutputBinding {
return &CosmosDB{logger: logger}
}
@ -62,57 +61,43 @@ func (c *CosmosDB) Init(metadata bindings.Metadata) error {
c.partitionKey = m.PartitionKey
// Create the client; first, try authenticating with a master key, if present
var config *documentdb.Config
var client *azcosmos.Client
if m.MasterKey != "" {
config = documentdb.NewConfig(&documentdb.Key{
Key: m.MasterKey,
})
cred, keyErr := azcosmos.NewKeyCredential(m.MasterKey)
if keyErr != nil {
return keyErr
}
client, err = azcosmos.NewClientWithKey(m.URL, cred, nil)
if err != nil {
return err
}
} else {
// Fallback to using Azure AD
env, errB := azure.NewEnvironmentSettings("cosmosdb", metadata.Properties)
if errB != nil {
return errB
env, errEnv := azure.NewEnvironmentSettings("cosmosdb", metadata.Properties)
if errEnv != nil {
return errEnv
}
spt, errB := env.GetServicePrincipalToken()
if errB != nil {
return errB
token, errToken := env.GetTokenCredential()
if errToken != nil {
return errToken
}
config = documentdb.NewConfigWithServicePrincipal(spt)
}
// disable the identification hydrator (which autogenerates IDs if missing from the request)
// so we aren't forced to use a struct by the upstream SDK
// this allows us to provide the most flexibility in the request document sent to this binding
config.IdentificationHydrator = nil
config.WithAppIdentifier("dapr-" + logger.DaprVersion)
c.client = documentdb.New(m.URL, config)
// Retries initializing the client if a TooManyRequests error is encountered
err = retryOperation(func() (err error) {
collLink := fmt.Sprintf("dbs/%s/colls/%s/", m.Database, m.Collection)
coll, err := c.client.ReadCollection(collLink)
client, err = azcosmos.NewClient(m.URL, token, nil)
if err != nil {
if isTooManyRequestsError(err) {
return err
}
return backoff.Permanent(err)
} else if coll == nil || coll.Self == "" {
return backoff.Permanent(
fmt.Errorf("collection %s in database %s for CosmosDB state store not found. This must be created before Dapr uses it", m.Collection, m.Database),
)
return err
}
}
c.collection = coll.Self
return nil
}, func(err error, d time.Duration) {
c.logger.Warnf("CosmosDB binding initialization failed: %v; retrying in %s", err, d)
}, 5*time.Minute)
// Create a container client
dbContainer, err := client.NewContainer(m.Database, m.Collection)
if err != nil {
return err
}
return nil
c.client = dbContainer
ctx, cancel := context.WithTimeout(context.Background(), timeoutValue*time.Second)
_, err = c.client.Read(ctx, nil)
cancel()
return err
}
func (c *CosmosDB) parseMetadata(metadata bindings.Metadata) (*cosmosDBCredentials, error) {
@ -135,7 +120,7 @@ func (c *CosmosDB) Operations() []bindings.OperationKind {
return []bindings.OperationKind{bindings.CreateOperation}
}
func (c *CosmosDB) Invoke(_ context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
func (c *CosmosDB) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
switch req.Operation {
case bindings.CreateOperation:
var obj interface{}
@ -144,41 +129,34 @@ func (c *CosmosDB) Invoke(_ context.Context, req *bindings.InvokeRequest) (*bind
return nil, err
}
val, err := c.getPartitionKeyValue(c.partitionKey, obj)
pkString, err := c.getPartitionKeyValue(c.partitionKey, obj)
if err != nil {
return nil, err
}
pk := azcosmos.NewPartitionKeyString(pkString)
err = retryOperation(func() error {
_, innerErr := c.client.CreateDocument(c.collection, obj, documentdb.PartitionKey(val))
if innerErr != nil {
if isTooManyRequestsError(innerErr) {
return innerErr
}
return backoff.Permanent(innerErr)
}
return nil
}, func(err error, d time.Duration) {
c.logger.Warnf("CosmosDB binding Invoke request failed: %v; retrying in %s", err, d)
}, 20*time.Second)
_, err = c.client.CreateItem(ctx, pk, req.Data, nil)
if err != nil {
return nil, err
}
return nil, nil
default:
return nil, fmt.Errorf("operation kind %s not supported", req.Operation)
}
}
func (c *CosmosDB) getPartitionKeyValue(key string, obj interface{}) (interface{}, error) {
val, err := c.lookup(obj.(map[string]interface{}), strings.Split(key, "."))
func (c *CosmosDB) getPartitionKeyValue(key string, obj interface{}) (string, error) {
valI, err := c.lookup(obj.(map[string]interface{}), strings.Split(key, "."))
if err != nil {
return nil, fmt.Errorf("missing partitionKey field %s from request body - %w", c.partitionKey, err)
return "", fmt.Errorf("missing partitionKey field %s from request body - %w", c.partitionKey, err)
}
val, ok := valI.(string)
if !ok {
return "", fmt.Errorf("partition key is not a string")
}
if val == "" {
return nil, fmt.Errorf("partitionKey field %s from request body is empty", c.partitionKey)
return "", fmt.Errorf("partitionKey field %s from request body is empty", c.partitionKey)
}
return val, nil
@ -209,24 +187,3 @@ func (c *CosmosDB) lookup(m map[string]interface{}, ks []string) (val interface{
return c.lookup(m, ks[1:])
}
func retryOperation(operation backoff.Operation, notify backoff.Notify, maxElapsedTime time.Duration) error {
bo := backoff.NewExponentialBackOff()
bo.InitialInterval = 2 * time.Second
bo.MaxElapsedTime = maxElapsedTime
return backoff.RetryNotify(operation, bo, notify)
}
func isTooManyRequestsError(err error) bool {
if err == nil {
return false
}
if requestError, ok := err.(*documentdb.RequestError); ok {
if requestError.Code == statusTooManyRequests {
return true
}
}
return false
}

View File

@ -54,7 +54,7 @@ type cosmosDBGremlinAPICredentials struct {
}
// NewCosmosDBGremlinAPI returns a new CosmosDBGremlinAPI instance.
func NewCosmosDBGremlinAPI(logger logger.Logger) *CosmosDBGremlinAPI {
func NewCosmosDBGremlinAPI(logger logger.Logger) bindings.OutputBinding {
return &CosmosDBGremlinAPI{logger: logger}
}

View File

@ -18,7 +18,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"time"
"github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2021-12-01/eventgrid"
@ -58,7 +58,7 @@ type azureEventGridMetadata struct {
}
// NewAzureEventGrid returns a new Azure Event Grid instance.
func NewAzureEventGrid(logger logger.Logger) *AzureEventGrid {
func NewAzureEventGrid(logger logger.Logger) bindings.InputOutputBinding {
return &AzureEventGrid{logger: logger}
}
@ -279,7 +279,7 @@ func (a *AzureEventGrid) createSubscription(ctx context.Context) error {
res := result.FutureAPI.Response()
if res.StatusCode != fasthttp.StatusCreated {
bodyBytes, err := ioutil.ReadAll(res.Body)
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
a.logger.Debugf("Failed reading error body when creating or updating Event Grid subscription: %v", err)

View File

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/Azure/azure-amqp-common-go/v3/aad"
@ -145,7 +146,7 @@ func (m azureEventHubsMetadata) partitioned() bool {
}
// NewAzureEventHubs returns a new Azure Event hubs instance.
func NewAzureEventHubs(logger logger.Logger) *AzureEventHubs {
func NewAzureEventHubs(logger logger.Logger) bindings.InputOutputBinding {
return &AzureEventHubs{logger: logger}
}
@ -154,6 +155,28 @@ func validate(connectionString string) error {
return err
}
func (a *AzureEventHubs) getStoragePrefixString() (string, error) {
hubName, err := a.validateAndGetHubName()
if err != nil {
return "", err
}
// empty string in the end of slice to have a suffix "-".
return strings.Join([]string{"dapr", hubName, a.metadata.consumerGroup, ""}, "-"), nil
}
func (a *AzureEventHubs) validateAndGetHubName() (string, error) {
hubName := a.metadata.eventHubName
if hubName == "" {
parsed, err := conn.ParsedConnectionFromStr(a.metadata.connectionString)
if err != nil {
return "", err
}
hubName = parsed.HubName
}
return hubName, nil
}
// Init performs metadata init.
func (a *AzureEventHubs) Init(metadata bindings.Metadata) error {
m, err := parseMetadata(metadata)
@ -360,7 +383,13 @@ func (a *AzureEventHubs) RegisterPartitionedEventProcessor(ctx context.Context,
// RegisterEventProcessor - receive eventhub messages by eventprocessor
// host by balancing partitions.
func (a *AzureEventHubs) RegisterEventProcessor(ctx context.Context, handler bindings.Handler) error {
leaserCheckpointer, err := storage.NewStorageLeaserCheckpointer(a.storageCredential, a.metadata.storageAccountName, a.metadata.storageContainerName, *a.azureEnvironment)
storagePrefix, err := a.getStoragePrefixString()
if err != nil {
return err
}
leaserPrefixOpt := storage.WithPrefixInBlobPath(storagePrefix)
leaserCheckpointer, err := storage.NewStorageLeaserCheckpointer(a.storageCredential, a.metadata.storageAccountName, a.metadata.storageContainerName, *a.azureEnvironment, leaserPrefixOpt)
if err != nil {
return err
}

View File

@ -17,15 +17,51 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
var testLogger = logger.NewLogger("test")
func TestGetStoragePrefixString(t *testing.T) {
props := map[string]string{"storageAccountName": "fake", "storageAccountKey": "fake", "consumerGroup": "default", "storageContainerName": "test", "eventHub": "hubName", "eventHubNamespace": "fake"}
metadata := bindings.Metadata{Base: metadata.Base{Properties: props}}
m, err := parseMetadata(metadata)
require.NoError(t, err)
aeh := &AzureEventHubs{logger: testLogger, metadata: m}
actual, _ := aeh.getStoragePrefixString()
assert.Equal(t, "dapr-hubName-default-", actual)
}
func TestGetStoragePrefixStringWithHubNameFromConnectionString(t *testing.T) {
connectionString := "Endpoint=sb://fake.servicebus.windows.net/;SharedAccessKeyName=fakeKey;SharedAccessKey=key;EntityPath=hubName"
props := map[string]string{"storageAccountName": "fake", "storageAccountKey": "fake", "consumerGroup": "default", "storageContainerName": "test", "connectionString": connectionString}
metadata := bindings.Metadata{Base: metadata.Base{Properties: props}}
m, err := parseMetadata(metadata)
require.NoError(t, err)
aeh := &AzureEventHubs{logger: testLogger, metadata: m}
actual, _ := aeh.getStoragePrefixString()
assert.Equal(t, "dapr-hubName-default-", actual)
}
func TestParseMetadata(t *testing.T) {
t.Run("test valid configuration", func(t *testing.T) {
props := map[string]string{connectionString: "fake", consumerGroup: "mygroup", storageAccountName: "account", storageAccountKey: "key", storageContainerName: "container"}
bindingsMetadata := bindings.Metadata{Properties: props}
bindingsMetadata := bindings.Metadata{Base: metadata.Base{Properties: props}}
m, err := parseMetadata(bindingsMetadata)
@ -77,7 +113,7 @@ func TestParseMetadata(t *testing.T) {
for _, c := range invalidConfigTestCases {
t.Run(c.name, func(t *testing.T) {
bindingsMetadata := bindings.Metadata{Properties: c.config}
bindingsMetadata := bindings.Metadata{Base: metadata.Base{Properties: c.config}}
_, err := parseMetadata(bindingsMetadata)
assert.Error(t, err)
assert.Equal(t, err.Error(), c.errMsg)

View File

@ -10,7 +10,8 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/dapr/components-contrib/bindings"
contrib_metadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/internal/utils"
contribMetadata "github.com/dapr/components-contrib/metadata"
)
type serviceBusQueuesMetadata struct {
@ -25,6 +26,7 @@ type serviceBusQueuesMetadata struct {
LockRenewalInSec int `json:"lockRenewalInSec"`
MaxConcurrentHandlers int `json:"maxConcurrentHandlers"`
ttl time.Duration
DisableEntityManagement bool `json:"disableEntityManagement"`
}
const (
@ -39,6 +41,7 @@ const (
maxActiveMessages = "maxActiveMessages"
lockRenewalInSec = "lockRenewalInSec"
maxConcurrentHandlers = "maxConcurrentHandlers"
disableEntityManagement = "disableEntityManagement"
// Default time to live for queues, which is 14 days. The same way Azure Portal does.
defaultMessageTimeToLive = time.Hour * 24 * 14
@ -64,6 +67,9 @@ const (
// Default rate of retriable errors per second
defaultMaxRetriableErrorsPerSec = 10
// By default entity management is enabled
defaultDisableEntityManagement = false
errorMessagePrefix = "azure service bus error:"
)
@ -82,7 +88,7 @@ func (a *AzureServiceBusQueues) parseMetadata(metadata bindings.Metadata) (*serv
return nil, errors.New("connectionString and namespaceName are mutually exclusive")
}
ttl, ok, err := contrib_metadata.TryGetTTL(metadata.Properties)
ttl, ok, err := contribMetadata.TryGetTTL(metadata.Properties)
if err != nil {
return nil, err
}
@ -162,5 +168,10 @@ func (a *AzureServiceBusQueues) parseMetadata(metadata bindings.Metadata) (*serv
m.MaxRetriableErrorsPerSec = to.Ptr(mRetriableErrorsPerSec)
}
m.DisableEntityManagement = defaultDisableEntityManagement
if val, ok := metadata.Properties[disableEntityManagement]; ok && val != "" {
m.DisableEntityManagement = utils.IsTruthy(val)
}
return &m, nil
}

View File

@ -103,7 +103,7 @@ func TestParseMetadata(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
m := bindings.Metadata{}
m.Properties = tt.properties
a := NewAzureServiceBusQueues(logger.NewLogger("test"))
a := NewAzureServiceBusQueues(logger.NewLogger("test")).(*AzureServiceBusQueues)
meta, err := a.parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, tt.expectedConnectionString, meta.ConnectionString)
@ -137,7 +137,7 @@ func TestParseMetadataWithInvalidTTL(t *testing.T) {
m := bindings.Metadata{}
m.Properties = tt.properties
a := NewAzureServiceBusQueues(logger.NewLogger("test"))
a := NewAzureServiceBusQueues(logger.NewLogger("test")).(*AzureServiceBusQueues)
_, err := a.parseMetadata(m)
assert.NotNil(t, err)
})
@ -183,7 +183,7 @@ func TestParseMetadataConnectionStringAndNamespaceNameExclusivity(t *testing.T)
t.Run(tt.name, func(t *testing.T) {
m := bindings.Metadata{}
m.Properties = tt.properties
a := NewAzureServiceBusQueues(logger.NewLogger("test"))
a := NewAzureServiceBusQueues(logger.NewLogger("test")).(*AzureServiceBusQueues)
meta, err := a.parseMetadata(m)
if tt.expectedErr {
assert.NotNil(t, err)

View File

@ -28,7 +28,7 @@ import (
"github.com/dapr/components-contrib/bindings"
azauth "github.com/dapr/components-contrib/internal/authentication/azure"
impl "github.com/dapr/components-contrib/internal/component/azure/servicebus"
contrib_metadata "github.com/dapr/components-contrib/metadata"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -52,7 +52,7 @@ type AzureServiceBusQueues struct {
}
// NewAzureServiceBusQueues returns a new AzureServiceBusQueues instance.
func NewAzureServiceBusQueues(logger logger.Logger) *AzureServiceBusQueues {
func NewAzureServiceBusQueues(logger logger.Logger) bindings.InputOutputBinding {
return &AzureServiceBusQueues{
senderLock: sync.RWMutex{},
logger: logger,
@ -76,9 +76,11 @@ func (a *AzureServiceBusQueues) Init(metadata bindings.Metadata) (err error) {
return err
}
a.adminClient, err = sbadmin.NewClientFromConnectionString(a.metadata.ConnectionString, nil)
if err != nil {
return err
if !a.metadata.DisableEntityManagement {
a.adminClient, err = sbadmin.NewClientFromConnectionString(a.metadata.ConnectionString, nil)
if err != nil {
return err
}
}
} else {
settings, innerErr := azauth.NewEnvironmentSettings(azauth.AzureServiceBusResourceName, metadata.Properties)
@ -98,37 +100,40 @@ func (a *AzureServiceBusQueues) Init(metadata bindings.Metadata) (err error) {
return innerErr
}
a.adminClient, innerErr = sbadmin.NewClient(a.metadata.NamespaceName, token, nil)
if innerErr != nil {
return innerErr
if !a.metadata.DisableEntityManagement {
a.adminClient, innerErr = sbadmin.NewClient(a.metadata.NamespaceName, token, nil)
if innerErr != nil {
return innerErr
}
}
}
ctx, cancel := context.WithTimeout(context.Background(), a.timeout)
defer cancel()
getQueueRes, err := a.adminClient.GetQueue(ctx, a.metadata.QueueName, nil)
if err != nil {
return err
}
if getQueueRes == nil {
// Need to create the queue
ttlDur := contrib_metadata.Duration{
Duration: a.metadata.ttl,
}
if a.adminClient != nil {
ctx, cancel := context.WithTimeout(context.Background(), a.timeout)
defer cancel()
_, err = a.adminClient.CreateQueue(ctx, a.metadata.QueueName, &sbadmin.CreateQueueOptions{
Properties: &sbadmin.QueueProperties{
DefaultMessageTimeToLive: to.Ptr(ttlDur.ToISOString()),
},
})
getQueueRes, err := a.adminClient.GetQueue(ctx, a.metadata.QueueName, nil)
if err != nil {
return err
}
if getQueueRes == nil {
// Need to create the queue
ttlDur := contribMetadata.Duration{
Duration: a.metadata.ttl,
}
ctx, cancel := context.WithTimeout(context.Background(), a.timeout)
defer cancel()
_, err = a.adminClient.CreateQueue(ctx, a.metadata.QueueName, &sbadmin.CreateQueueOptions{
Properties: &sbadmin.QueueProperties{
DefaultMessageTimeToLive: to.Ptr(ttlDur.ToISOString()),
},
})
if err != nil {
return err
}
}
a.ctx, a.cancel = context.WithCancel(context.Background())
}
a.ctx, a.cancel = context.WithCancel(context.Background())
return nil
}
@ -155,7 +160,8 @@ func (a *AzureServiceBusQueues) Invoke(ctx context.Context, req *bindings.Invoke
}
msg := &servicebus.Message{
Body: req.Data,
Body: req.Data,
ApplicationProperties: make(map[string]interface{}),
}
if val, ok := req.Metadata[id]; ok && val != "" {
msg.MessageID = &val
@ -163,7 +169,17 @@ func (a *AzureServiceBusQueues) Invoke(ctx context.Context, req *bindings.Invoke
if val, ok := req.Metadata[correlationID]; ok && val != "" {
msg.CorrelationID = &val
}
ttl, ok, err := contrib_metadata.TryGetTTL(req.Metadata)
// Include incoming metadata in the message to be used when it is read.
for k, v := range req.Metadata {
// Don't include the values that are saved in MessageID or CorrelationID.
if k == id || k == correlationID {
continue
}
msg.ApplicationProperties[k] = v
}
ttl, ok, err := contribMetadata.TryGetTTL(req.Metadata)
if err != nil {
return nil, err
}
@ -262,6 +278,13 @@ func (a *AzureServiceBusQueues) getHandlerFunc(handler bindings.Handler) impl.Ha
metadata[label] = *msg.Subject
}
// Passthrough any custom metadata to the handler.
for key, val := range msg.ApplicationProperties {
if stringVal, ok := val.(string); ok {
metadata[key] = stringVal
}
}
_, err := handler(a.ctx, &bindings.ReadResponse{
Data: msg.Body,
Metadata: metadata,

View File

@ -17,7 +17,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"strings"
"time"
@ -58,7 +58,7 @@ func init() {
}
// NewSignalR creates a new output binding for Azure SignalR.
func NewSignalR(logger logger.Logger) *SignalR {
func NewSignalR(logger logger.Logger) bindings.OutputBinding {
return &SignalR{
logger: logger,
httpClient: httpClient,
@ -197,7 +197,7 @@ func (s *SignalR) resolveAPIURL(req *bindings.InvokeRequest) (string, error) {
}
func (s *SignalR) sendMessageToSignalR(ctx context.Context, url string, token string, data []byte) error {
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(data))
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
return err
}
@ -213,7 +213,7 @@ func (s *SignalR) sendMessageToSignalR(ctx context.Context, url string, token st
defer resp.Body.Close()
// Read the body regardless to drain it and ensure the connection can be reused
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@ -263,7 +263,8 @@ func (s *SignalR) getToken(ctx context.Context, url string) (token string, err e
}
token = at.Token
} else {
claims := &jwt.StandardClaims{
// TODO: Use jwt.RegisteredClaims instead
claims := &jwt.StandardClaims{ //nolint:staticcheck
ExpiresAt: time.Now().Add(15 * time.Minute).Unix(),
Audience: url,
}

View File

@ -17,7 +17,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"strings"
"sync/atomic"
@ -186,7 +186,7 @@ func TestConfigurationValid(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := NewSignalR(logger.NewLogger("test"))
s := NewSignalR(logger.NewLogger("test")).(*SignalR)
err := s.parseMetadata(tt.properties)
assert.Nil(t, err)
assert.Equal(t, tt.expectedEndpoint, s.endpoint)
@ -256,7 +256,7 @@ func TestInvalidConfigurations(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := NewSignalR(logger.NewLogger("test"))
s := NewSignalR(logger.NewLogger("test")).(*SignalR)
err := s.parseMetadata(tt.properties)
assert.NotNil(t, err)
})
@ -284,10 +284,10 @@ func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
func TestWriteShouldFail(t *testing.T) {
httpTransport := &mockTransport{
response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))},
response: &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader(""))},
}
s := NewSignalR(logger.NewLogger("test"))
s := NewSignalR(logger.NewLogger("test")).(*SignalR)
s.endpoint = "https://fake.service.signalr.net"
s.accessKey = "G7+nIt9n48+iYSltPRf1v8kE+MupFfEt/9NSNTKOdzA="
s.httpClient = &http.Client{
@ -335,10 +335,10 @@ func TestWriteShouldFail(t *testing.T) {
func TestWriteShouldSucceed(t *testing.T) {
httpTransport := &mockTransport{
response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))},
response: &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader(""))},
}
s := NewSignalR(logger.NewLogger("test"))
s := NewSignalR(logger.NewLogger("test")).(*SignalR)
s.endpoint = "https://fake.service.signalr.net"
s.accessKey = "fakekey"
s.httpClient = &http.Client{

View File

@ -26,8 +26,7 @@ import (
"github.com/dapr/components-contrib/bindings"
azauth "github.com/dapr/components-contrib/internal/authentication/azure"
"github.com/dapr/components-contrib/internal/utils"
contrib_metadata "github.com/dapr/components-contrib/metadata"
mdutils "github.com/dapr/components-contrib/metadata"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -179,7 +178,7 @@ type storageQueuesMetadata struct {
}
// NewAzureStorageQueues returns a new AzureStorageQueues instance.
func NewAzureStorageQueues(logger logger.Logger) *AzureStorageQueues {
func NewAzureStorageQueues(logger logger.Logger) bindings.InputOutputBinding {
return &AzureStorageQueues{helper: NewAzureQueueHelper(logger), logger: logger}
}
@ -197,25 +196,25 @@ func parseMetadata(metadata bindings.Metadata) (*storageQueuesMetadata, error) {
var m storageQueuesMetadata
// AccountKey is parsed in azauth
if val, ok := mdutils.GetMetadataProperty(metadata.Properties, azauth.StorageAccountNameKeys...); ok && val != "" {
if val, ok := contribMetadata.GetMetadataProperty(metadata.Properties, azauth.StorageAccountNameKeys...); ok && val != "" {
m.AccountName = val
} else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageAccountNameKeys[0])
}
if val, ok := mdutils.GetMetadataProperty(metadata.Properties, azauth.StorageQueueNameKeys...); ok && val != "" {
if val, ok := contribMetadata.GetMetadataProperty(metadata.Properties, azauth.StorageQueueNameKeys...); ok && val != "" {
m.QueueName = val
} else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageQueueNameKeys[0])
}
if val, ok := mdutils.GetMetadataProperty(metadata.Properties, azauth.StorageEndpointKeys...); ok && val != "" {
if val, ok := contribMetadata.GetMetadataProperty(metadata.Properties, azauth.StorageEndpointKeys...); ok && val != "" {
m.QueueEndpoint = val
}
m.DecodeBase64 = utils.IsTruthy(metadata.Properties["decodeBase64"])
ttl, ok, err := contrib_metadata.TryGetTTL(metadata.Properties)
ttl, ok, err := contribMetadata.TryGetTTL(metadata.Properties)
if err != nil {
return nil, err
}
@ -232,7 +231,7 @@ func (a *AzureStorageQueues) Operations() []bindings.OperationKind {
func (a *AzureStorageQueues) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
ttlToUse := a.metadata.ttl
ttl, ok, err := contrib_metadata.TryGetTTL(req.Metadata)
ttl, ok, err := contribMetadata.TryGetTTL(req.Metadata)
if err != nil {
return nil, err
}

View File

@ -287,7 +287,7 @@ func TestReadQueueNoMessage(t *testing.T) {
}
func TestParseMetadata(t *testing.T) {
var oneSecondDuration time.Duration = time.Second
oneSecondDuration := time.Second
testCases := []struct {
name string
@ -295,7 +295,7 @@ func TestParseMetadata(t *testing.T) {
// Account key is parsed in azauth
// expectedAccountKey string
expectedQueueName string
expectedQueueEndpointUrl string
expectedQueueEndpointURL string
expectedTTL *time.Duration
}{
{
@ -303,21 +303,21 @@ func TestParseMetadata(t *testing.T) {
properties: map[string]string{"storageAccessKey": "myKey", "queue": "queue1", "storageAccount": "devstoreaccount1"},
// expectedAccountKey: "myKey",
expectedQueueName: "queue1",
expectedQueueEndpointUrl: "",
expectedQueueEndpointURL: "",
},
{
name: "Accout, key, and endpoint",
properties: map[string]string{"accountKey": "myKey", "queueName": "queue1", "storageAccount": "someAccount", "queueEndpointUrl": "https://foo.example.com:10001"},
// expectedAccountKey: "myKey",
expectedQueueName: "queue1",
expectedQueueEndpointUrl: "https://foo.example.com:10001",
expectedQueueEndpointURL: "https://foo.example.com:10001",
},
{
name: "Empty TTL",
properties: map[string]string{"storageAccessKey": "myKey", "queue": "queue1", "storageAccount": "devstoreaccount1", metadata.TTLMetadataKey: ""},
// expectedAccountKey: "myKey",
expectedQueueName: "queue1",
expectedQueueEndpointUrl: "",
expectedQueueEndpointURL: "",
},
{
name: "With TTL",
@ -325,7 +325,7 @@ func TestParseMetadata(t *testing.T) {
// expectedAccountKey: "myKey",
expectedQueueName: "queue1",
expectedTTL: &oneSecondDuration,
expectedQueueEndpointUrl: "",
expectedQueueEndpointURL: "",
},
}
@ -340,7 +340,7 @@ func TestParseMetadata(t *testing.T) {
// assert.Equal(t, tt.expectedAccountKey, meta.AccountKey)
assert.Equal(t, tt.expectedQueueName, meta.QueueName)
assert.Equal(t, tt.expectedTTL, meta.ttl)
assert.Equal(t, tt.expectedQueueEndpointUrl, meta.QueueEndpoint)
assert.Equal(t, tt.expectedQueueEndpointURL, meta.QueueEndpoint)
})
}
}

20
bindings/bindings.go Normal file
View File

@ -0,0 +1,20 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bindings
// InputOutputBinding is the interface for bindings that implement both input and output bindings.
type InputOutputBinding interface {
InputBinding
OutputBinding
}

View File

@ -43,7 +43,7 @@ type commercetoolsMetadata struct {
scopes string
}
func NewCommercetools(logger logger.Logger) *Binding {
func NewCommercetools(logger logger.Logger) bindings.OutputBinding {
return &Binding{logger: logger}
}

View File

@ -36,7 +36,7 @@ type Binding struct {
}
// NewCron returns a new Cron event input binding.
func NewCron(logger logger.Logger) *Binding {
func NewCron(logger logger.Logger) bindings.InputOutputBinding {
return &Binding{
logger: logger,
parser: cron.NewParser(
@ -47,8 +47,9 @@ func NewCron(logger logger.Logger) *Binding {
// Init initializes the Cron binding
// Examples from https://godoc.org/github.com/robfig/cron:
// "15 * * * * *" - Every 15 sec
// "0 30 * * * *" - Every 30 min
//
// "15 * * * * *" - Every 15 sec
// "0 30 * * * *" - Every 30 min
func (b *Binding) Init(metadata bindings.Metadata) error {
b.name = metadata.Name
s, f := metadata.Properties["schedule"]
@ -85,12 +86,12 @@ func (b *Binding) Read(ctx context.Context, handler bindings.Handler) error {
b.logger.Debugf("name: %s, next run: %v", b.name, time.Until(c.Entry(id).Next))
go func() {
// Wait for a context to be canceled or a message on the stopCh
// Wait for a context to be canceled
select {
case <-b.runningCtx.Done():
// Do nothing
case <-ctx.Done():
b.runningCancel()
b.resetContext()
}
b.logger.Debugf("name: %s, stopping schedule: %s", b.name, b.schedule)
c.Stop()

View File

@ -40,7 +40,7 @@ func getNewCron() *Binding {
l.SetOutputLevel(logger.DebugLevel)
}
return NewCron(l)
return NewCron(l).(*Binding)
}
// go test -v -timeout 15s -count=1 ./bindings/cron/.

View File

@ -20,7 +20,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"strconv"
@ -79,7 +78,7 @@ type createResponse struct {
}
// NewGCPStorage returns a new GCP storage instance.
func NewGCPStorage(logger logger.Logger) *GCPStorage {
func NewGCPStorage(logger logger.Logger) bindings.OutputBinding {
return &GCPStorage{logger: logger}
}
@ -214,9 +213,9 @@ func (g *GCPStorage) get(ctx context.Context, req *bindings.InvokeRequest) (*bin
}
defer rc.Close()
data, err := ioutil.ReadAll(rc)
data, err := io.ReadAll(rc)
if err != nil {
return nil, fmt.Errorf("gcp bucketgcp bucket binding error: ioutil.ReadAll: %v", err)
return nil, fmt.Errorf("gcp bucketgcp bucket binding error: io.ReadAll: %v", err)
}
if metadata.EncodeBase64 {

View File

@ -54,7 +54,7 @@ type pubSubMetadata struct {
}
// NewGCPPubSub returns a new GCPPubSub instance.
func NewGCPPubSub(logger logger.Logger) *GCPPubSub {
func NewGCPPubSub(logger logger.Logger) bindings.InputOutputBinding {
return &GCPPubSub{logger: logger}
}

View File

@ -53,7 +53,7 @@ type GraphQL struct {
}
// NewGraphQL returns a new GraphQL binding instance.
func NewGraphQL(logger logger.Logger) *GraphQL {
func NewGraphQL(logger logger.Logger) bindings.OutputBinding {
return &GraphQL{logger: logger}
}
@ -112,7 +112,7 @@ func (gql *GraphQL) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*b
var graphqlResponse interface{}
switch req.Operation { // nolint: exhaustive
switch req.Operation { //nolint:exhaustive
case QueryOperation:
if err := gql.runRequest(ctx, commandQuery, req, &graphqlResponse); err != nil {
return nil, err

View File

@ -18,7 +18,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
@ -33,6 +32,7 @@ import (
)
// HTTPSource is a binding for an http url endpoint invocation
//
//revive:disable-next-line
type HTTPSource struct {
metadata httpMetadata
@ -46,7 +46,7 @@ type httpMetadata struct {
}
// NewHTTP returns a new HTTPSource.
func NewHTTP(logger logger.Logger) *HTTPSource {
func NewHTTP(logger logger.Logger) bindings.OutputBinding {
return &HTTPSource{logger: logger}
}
@ -148,7 +148,7 @@ func (h *HTTPSource) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*
// Read the response body. For empty responses (e.g. 204 No Content)
// `b` will be an empty slice.
b, err := ioutil.ReadAll(resp.Body)
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}

View File

@ -15,7 +15,7 @@ package http_test
import (
"context"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"strings"
@ -25,12 +25,13 @@ import (
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/bindings"
binding_http "github.com/dapr/components-contrib/bindings/http"
bindingHttp "github.com/dapr/components-contrib/bindings/http"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
func TestOperations(t *testing.T) {
opers := (*binding_http.HTTPSource)(nil).Operations()
opers := (*bindingHttp.HTTPSource)(nil).Operations()
assert.Equal(t, []bindings.OperationKind{
bindings.CreateOperation,
"get",
@ -53,7 +54,7 @@ func TestInit(t *testing.T) {
input := req.Method
if req.Body != nil {
defer req.Body.Close()
b, _ := ioutil.ReadAll(req.Body)
b, _ := io.ReadAll(req.Body)
if len(b) > 0 {
input = string(b)
}
@ -64,19 +65,19 @@ func TestInit(t *testing.T) {
}
w.Header().Set("Content-Type", "text/plain")
if input == "internal server error" {
w.WriteHeader(500)
w.WriteHeader(http.StatusInternalServerError)
}
w.Write([]byte(strings.ToUpper(input)))
}),
)
defer s.Close()
m := bindings.Metadata{
m := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
"url": s.URL,
},
}
hs := binding_http.NewHTTP(logger.NewLogger("test"))
}}
hs := bindingHttp.NewHTTP(logger.NewLogger("test"))
err := hs.Init(m)
require.NoError(t, err)

View File

@ -18,7 +18,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"strconv"
"github.com/google/uuid"
@ -70,7 +70,7 @@ type listPayload struct {
}
// NewHuaweiOBS returns a new Huawei OBS instance.
func NewHuaweiOBS(logger logger.Logger) *HuaweiOBS {
func NewHuaweiOBS(logger logger.Logger) bindings.OutputBinding {
return &HuaweiOBS{logger: logger}
}
@ -228,7 +228,7 @@ func (o *HuaweiOBS) get(ctx context.Context, req *bindings.InvokeRequest) (*bind
}
}()
data, err := ioutil.ReadAll(out.Body)
data, err := io.ReadAll(out.Body)
if err != nil {
return nil, fmt.Errorf("obs binding error. error reading obs object content: %w", err)
}

View File

@ -18,7 +18,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"strings"
"testing"
"testing/iotest"
@ -61,7 +61,7 @@ func (m *MockHuaweiOBSService) ListObjects(ctx context.Context, input *obs.ListO
}
func TestParseMetadata(t *testing.T) {
obs := NewHuaweiOBS(logger.NewLogger("test"))
obs := NewHuaweiOBS(logger.NewLogger("test")).(*HuaweiOBS)
t.Run("Has correct metadata", func(t *testing.T) {
m := bindings.Metadata{}
@ -374,7 +374,7 @@ func TestGetOperation(t *testing.T) {
},
Metadata: map[string]string{},
},
Body: ioutil.NopCloser(strings.NewReader("Hello Dapr")),
Body: io.NopCloser(strings.NewReader("Hello Dapr")),
}, nil
},
},
@ -447,7 +447,7 @@ func TestGetOperation(t *testing.T) {
},
Metadata: map[string]string{},
},
Body: ioutil.NopCloser(iotest.ErrReader(errors.New("unexpected data reading error"))),
Body: io.NopCloser(iotest.ErrReader(errors.New("unexpected data reading error"))),
}, nil
},
},
@ -667,7 +667,7 @@ func TestInvoke(t *testing.T) {
},
Metadata: map[string]string{},
},
Body: ioutil.NopCloser(strings.NewReader("Hello Dapr")),
Body: io.NopCloser(strings.NewReader("Hello Dapr")),
}, nil
},
},

View File

@ -59,7 +59,7 @@ type influxMetadata struct {
}
// NewInflux returns a new kafka binding instance.
func NewInflux(logger logger.Logger) *Influx {
func NewInflux(logger logger.Logger) bindings.OutputBinding {
return &Influx{logger: logger}
}

View File

@ -22,6 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -46,13 +47,13 @@ func TestOperations(t *testing.T) {
}
func TestInflux_Init(t *testing.T) {
influx := NewInflux(logger.NewLogger("test"))
influx := NewInflux(logger.NewLogger("test")).(*Influx)
assert.Nil(t, influx.queryAPI)
assert.Nil(t, influx.writeAPI)
assert.Nil(t, influx.metadata)
assert.Nil(t, influx.client)
m := bindings.Metadata{Properties: map[string]string{"Url": "a", "Token": "a", "Org": "a", "Bucket": "a"}}
m := bindings.Metadata{Base: metadata.Base{Properties: map[string]string{"Url": "a", "Token": "a", "Org": "a", "Bucket": "a"}}}
err := influx.Init(m)
assert.Nil(t, err)

View File

@ -36,6 +36,6 @@ func PingInpBinding(inputBinding InputBinding) error {
if inputBindingWithPing, ok := inputBinding.(health.Pinger); ok {
return inputBindingWithPing.Ping()
} else {
return fmt.Errorf("Ping is not implemented by this input binding")
return fmt.Errorf("ping is not implemented by this input binding")
}
}

254
bindings/ipfs/ipfs.go Normal file
View File

@ -0,0 +1,254 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
ipfsHttpclient "github.com/ipfs/go-ipfs-http-client"
ipfsIcore "github.com/ipfs/interface-go-ipfs-core"
ipfsConfig "github.com/ipfs/kubo/config"
ipfsCore "github.com/ipfs/kubo/core"
ipfsCoreapi "github.com/ipfs/kubo/core/coreapi"
ipfsLibp2p "github.com/ipfs/kubo/core/node/libp2p"
ipfsLoader "github.com/ipfs/kubo/plugin/loader"
ipfsRepo "github.com/ipfs/kubo/repo"
ipfsFsrepo "github.com/ipfs/kubo/repo/fsrepo"
"github.com/multiformats/go-multiaddr"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/kit/logger"
)
const swarmKeyFile = "swarm.key"
var (
loadPluginsOnce sync.Once
httpClient *http.Client
)
func init() {
httpClient = &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
}).Dial,
},
}
}
// IPFSBinding is a binding for interacting with an IPFS network.
type IPFSBinding struct {
metadata ipfsMetadata
ipfsAPI ipfsIcore.CoreAPI
ipfsNode *ipfsCore.IpfsNode
ipfsRepo ipfsRepo.Repo
ctx context.Context
cancel context.CancelFunc
logger logger.Logger
}
// NewIPFSBinding returns a new IPFSBinding.
func NewIPFSBinding(logger logger.Logger) bindings.OutputBinding {
return &IPFSBinding{
logger: logger,
}
}
// Init the binding.
func (b *IPFSBinding) Init(metadata bindings.Metadata) (err error) {
b.ctx, b.cancel = context.WithCancel(context.Background())
err = b.metadata.FromMap(metadata.Properties)
if err != nil {
return err
}
if b.metadata.ExternalAPI == "" {
var onceErr error
loadPluginsOnce.Do(func() {
onceErr = setupPlugins("")
})
if onceErr != nil {
return onceErr
}
err = b.createNode()
if err != nil {
return fmt.Errorf("failed to start IPFS node: %v", err)
}
} else {
if b.metadata.ExternalAPI[0] == '/' {
var maddr multiaddr.Multiaddr
maddr, err = multiaddr.NewMultiaddr(b.metadata.ExternalAPI)
if err != nil {
return fmt.Errorf("failed to parse external API multiaddr: %v", err)
}
b.ipfsAPI, err = ipfsHttpclient.NewApiWithClient(maddr, httpClient)
} else {
b.ipfsAPI, err = ipfsHttpclient.NewURLApiWithClient(b.metadata.ExternalAPI, httpClient)
}
if err != nil {
return fmt.Errorf("failed to initialize external IPFS API: %v", err)
}
b.logger.Infof("Using IPFS APIs at %s", b.metadata.ExternalAPI)
}
return nil
}
func (b *IPFSBinding) Close() (err error) {
if b.cancel != nil {
b.cancel()
b.cancel = nil
}
if b.ipfsNode != nil {
err = b.ipfsNode.Close()
if err != nil {
b.logger.Errorf("Error while closing IPFS node: %v", err)
}
b.ipfsNode = nil
}
if b.ipfsRepo != nil {
err = b.ipfsRepo.Close()
if err != nil {
b.logger.Errorf("Error while closing IPFS repo: %v", err)
}
b.ipfsRepo = nil
}
return nil
}
func (b *IPFSBinding) createNode() (err error) {
// Init the repo if needed
if !ipfsFsrepo.IsInitialized(b.metadata.RepoPath) {
var cfg *ipfsConfig.Config
cfg, err = b.metadata.IPFSConfig()
if err != nil {
return err
}
err = ipfsFsrepo.Init(b.metadata.RepoPath, cfg)
if err != nil {
return err
}
if b.metadata.SwarmKey != "" {
skPath := filepath.Join(b.metadata.RepoPath, swarmKeyFile)
err = os.WriteFile(skPath, []byte(b.metadata.SwarmKey), 0o600)
if err != nil {
return fmt.Errorf("error writing swarm key to file '%s': %v", skPath, err)
}
}
b.logger.Infof("Initialized a new IPFS repo at path %s", b.metadata.RepoPath)
}
// Open the repo
repo, err := ipfsFsrepo.Open(b.metadata.RepoPath)
if err != nil {
return err
}
b.logger.Infof("Opened IPFS repo at path %s", b.metadata.RepoPath)
// Create the node
nodeOptions := &ipfsCore.BuildCfg{
Online: true,
Repo: repo,
}
r := strings.ToLower(b.metadata.Routing)
switch r {
case "", "dht":
nodeOptions.Routing = ipfsLibp2p.DHTOption
case "dhtclient":
nodeOptions.Routing = ipfsLibp2p.DHTClientOption
case "dhtserver":
nodeOptions.Routing = ipfsLibp2p.DHTServerOption
case "none":
nodeOptions.Routing = ipfsLibp2p.NilRouterOption
default:
return fmt.Errorf("invalid value for metadata property 'routing'")
}
b.ipfsNode, err = ipfsCore.NewNode(b.ctx, nodeOptions)
if err != nil {
return err
}
b.logger.Infof("Started IPFS node %s", b.ipfsNode.Identity)
// Init API
b.ipfsAPI, err = ipfsCoreapi.NewCoreAPI(b.ipfsNode)
if err != nil {
return err
}
return nil
}
// Operations returns the supported operations for this binding.
func (b *IPFSBinding) Operations() []bindings.OperationKind {
return []bindings.OperationKind{
bindings.GetOperation,
bindings.CreateOperation, // alias for "add"
bindings.ListOperation, // alias for "ls"
bindings.DeleteOperation, // alias for "pin-rm"
"add",
"ls",
"pin-add",
"pin-rm",
"pin-ls",
}
}
// Invoke performs an HTTP request to the configured HTTP endpoint.
func (b *IPFSBinding) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
switch req.Operation {
case bindings.GetOperation:
return b.getOperation(ctx, req)
case "add", bindings.CreateOperation:
return b.addOperation(ctx, req)
case "ls", bindings.ListOperation:
return b.lsOperation(ctx, req)
case "pin-add":
return b.pinAddOperation(ctx, req)
case "pin-ls":
return b.pinLsOperation(ctx, req)
case "pin-rm", bindings.DeleteOperation:
return b.pinRmOperation(ctx, req)
}
return &bindings.InvokeResponse{
Data: nil,
Metadata: nil,
}, nil
}
func setupPlugins(externalPluginsPath string) error {
plugins, err := ipfsLoader.NewPluginLoader("")
if err != nil {
return fmt.Errorf("error loading plugins: %s", err)
}
if err := plugins.Initialize(); err != nil {
return fmt.Errorf("error initializing plugins: %s", err)
}
if err := plugins.Inject(); err != nil {
return fmt.Errorf("error initializing plugins: %s", err)
}
return nil
}

425
bindings/ipfs/ipfs_test.go Normal file
View File

@ -0,0 +1,425 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"log"
"os"
"reflect"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/internal/utils"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
func TestMain(m *testing.M) {
if !utils.IsTruthy(os.Getenv("IPFS_TEST")) {
log.Println("IPFS_TEST env var is not set to a truthy value; skipping tests")
os.Exit(0)
}
os.Exit(m.Run())
}
func TestSingleNodeGlobalNetwork(t *testing.T) {
var b *IPFSBinding
repoPath := t.TempDir()
// CIDS contained in the QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG folder
folderCids := []string{
"QmZTR5bcpQD7cFgTorqxZDYaew1Wqgfbd2ud9QqGPAkK2V",
"QmYCvbfNbCwFR45HiNP45rwJgvatpiW38D961L5qAhUM5Y",
"QmY5heUM5qgRubMDD1og9fhCPA6QdkMp3QCwd4s7gJsyE7",
"QmdncfsVm2h5Kqq9hPmU7oAVX2zTSVP3L869tgTbPYnsha",
"QmPZ9gcCEpqKTo6aq61g2nXGUhM4iCL3ewB6LDXZCtioEB",
"QmTumTjvcYCAvRRwQ8sDRxh8ezmrcr88YFU7iYNroGGTBZ",
}
sort.Strings(folderCids)
t.Run("init node", func(t *testing.T) {
b = NewIPFSBinding(logger.NewLogger("tests")).(*IPFSBinding)
err := b.Init(bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{
"repoPath": repoPath,
"routing": "dhtclient",
},
}})
require.NoError(t, err)
})
t.Run("get operation", func(t *testing.T) {
t.Run("empty path", func(t *testing.T) {
_, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "get",
})
if assert.Error(t, err) {
assert.Equal(t, err.Error(), "metadata property 'path' is empty")
}
})
t.Run("retrieve document by CID", func(t *testing.T) {
data := getDocument(t, b, "QmPZ9gcCEpqKTo6aq61g2nXGUhM4iCL3ewB6LDXZCtioEB")
compareHash(t, data, "a48161fca5edd15f4649bb928c10769216fccdf317265fc75d747c1e6892f53c")
})
t.Run("retrieve document by IPLD", func(t *testing.T) {
// Same document, but different addressing method
data := getDocument(t, b, "/ipfs/QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG/readme")
compareHash(t, data, "a48161fca5edd15f4649bb928c10769216fccdf317265fc75d747c1e6892f53c")
})
t.Run("cannot retrieve folder", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := b.Invoke(ctx, &bindings.InvokeRequest{
Operation: "get",
Metadata: map[string]string{
"path": "/ipfs/QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
},
})
if assert.Error(t, err) {
assert.Equal(t, err.Error(), "path does not represent a file")
}
})
// Retrieve files also to speed up pinning later
t.Run("retrieve files", func(t *testing.T) {
for _, e := range folderCids {
getDocument(t, b, e)
}
getDocument(t, b, "QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v")
})
})
t.Run("ls operation", func(t *testing.T) {
t.Run("empty path", func(t *testing.T) {
_, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "ls",
})
if assert.Error(t, err) {
assert.Equal(t, err.Error(), "metadata property 'path' is empty")
}
})
testLsOperationResponse := func(t *testing.T, list lsOperationResponse) {
cids := make([]string, len(list))
for i, e := range list {
cids[i] = e.Cid
}
sort.Strings(cids)
assert.True(t, reflect.DeepEqual(cids, folderCids), "received='%v' expected='%v'", cids, folderCids)
}
t.Run("list by CID", func(t *testing.T) {
list := listPath(t, b, "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG")
require.NotEmpty(t, list)
testLsOperationResponse(t, list)
})
t.Run("list by IPLD", func(t *testing.T) {
list := listPath(t, b, "/ipfs/QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG")
require.NotEmpty(t, list)
testLsOperationResponse(t, list)
})
})
t.Run("pin operations", func(t *testing.T) {
t.Run("pin-ls: nothing is pinned on a new node", func(t *testing.T) {
list := listPins(t, b)
assert.Empty(t, list)
})
t.Run("pin-add: pin file by CID", func(t *testing.T) {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-add",
Metadata: map[string]string{
"path": "QmPZ9gcCEpqKTo6aq61g2nXGUhM4iCL3ewB6LDXZCtioEB",
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.Empty(t, res.Data)
})
t.Run("pin-ls: list added pin", func(t *testing.T) {
list := listPins(t, b)
assert.Len(t, list, 1)
assert.Equal(t, "QmPZ9gcCEpqKTo6aq61g2nXGUhM4iCL3ewB6LDXZCtioEB", list[0].Cid)
})
t.Run("pin-add: pin file by IPLD", func(t *testing.T) {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-add",
Metadata: map[string]string{
"path": "/ipfs/QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v",
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.Empty(t, res.Data)
})
t.Run("pin-add: recursively pin folder", func(t *testing.T) {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-add",
Metadata: map[string]string{
"path": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.Empty(t, res.Data)
})
// Add the folder to the list of expected items
expect := make([]string, len(folderCids)+2)
expect[0] = "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"
expect[1] = "QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v"
copy(expect[2:], folderCids)
sort.Strings(expect)
t.Run("pin-ls: list added folder", func(t *testing.T) {
cids := listPinnedCids(t, b)
assert.True(t, reflect.DeepEqual(cids, expect), "received='%v' expected='%v'", cids, expect)
})
t.Run("pin-rm: remove file", func(t *testing.T) {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-rm",
Metadata: map[string]string{
"path": "QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v",
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.Empty(t, res.Data)
})
// Remove the un-pinned file
i := 0
for _, e := range expect {
if e != "QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v" {
expect[i] = e
i++
}
}
expect = expect[:i]
t.Run("pin-ls: updated after removed file", func(t *testing.T) {
cids := listPinnedCids(t, b)
assert.True(t, reflect.DeepEqual(cids, expect), "received='%v' expected='%v'", cids, expect)
})
t.Run("pin-rm: recursively remove folder", func(t *testing.T) {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-rm",
Metadata: map[string]string{
"path": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.Empty(t, res.Data)
})
t.Run("pin-rm: remove explicitly-pinned file", func(t *testing.T) {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-rm",
Metadata: map[string]string{
"path": "QmPZ9gcCEpqKTo6aq61g2nXGUhM4iCL3ewB6LDXZCtioEB",
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.Empty(t, res.Data)
})
t.Run("pin-ls: updated list is empty", func(t *testing.T) {
list := listPins(t, b)
assert.Empty(t, list)
})
})
t.Run("create operation", func(t *testing.T) {
expectPins := []string{}
t.Run("add with default options", func(t *testing.T) {
path := addDocument(t, b,
[]byte("Quel ramo del lago di Como, che volge a mezzogiorno"),
nil,
)
assert.Equal(t, "/ipfs/QmRW7jvkePyaAFvtapaqZ9kNkziUrmkhi4ue5oNpXS2qUx", path)
expectPins = append(expectPins, "QmRW7jvkePyaAFvtapaqZ9kNkziUrmkhi4ue5oNpXS2qUx")
})
t.Run("add with CID v1", func(t *testing.T) {
path := addDocument(t, b,
[]byte("Quel ramo del lago di Como, che volge a mezzogiorno"),
map[string]string{"cidVersion": "1"},
)
assert.Equal(t, "/ipfs/bafkreidhuwuwgycmsbj4sesi3pm6vpxpbm6byt3twex7sc2nadaxksnqeq", path)
expectPins = append(expectPins, "bafkreidhuwuwgycmsbj4sesi3pm6vpxpbm6byt3twex7sc2nadaxksnqeq")
})
t.Run("added files are pinned", func(t *testing.T) {
cids := listPinnedCids(t, b)
assert.True(t, reflect.DeepEqual(cids, expectPins), "received='%v' expected='%v'", cids, expectPins)
})
t.Run("add without pinning", func(t *testing.T) {
path := addDocument(t, b,
[]byte("😁🐶"),
map[string]string{"pin": "false"},
)
assert.Equal(t, "/ipfs/QmWsLpV1UUD26qHaEJqXfHazSRRZVX82M51EQ87UT7ryiR", path)
})
t.Run("pinned documents haven't changed", func(t *testing.T) {
cids := listPinnedCids(t, b)
assert.True(t, reflect.DeepEqual(cids, expectPins), "received='%v' expected='%v'", cids, expectPins)
})
t.Run("add inline", func(t *testing.T) {
path := addDocument(t, b,
[]byte("😁🐶"),
map[string]string{"inline": "true"},
)
assert.Equal(t, "/ipfs/bafyaaeakbyeaeeqi6cpzrapqt6ilmgai", path)
})
})
if b != nil {
b.Close()
}
}
func getDocument(t *testing.T, b *IPFSBinding, path string) []byte {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
res, err := b.Invoke(ctx, &bindings.InvokeRequest{
Operation: "get",
Metadata: map[string]string{
"path": path,
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.NotEmpty(t, res.Data)
return res.Data
}
func listPath(t *testing.T, b *IPFSBinding, path string) lsOperationResponse {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
res, err := b.Invoke(ctx, &bindings.InvokeRequest{
Operation: "ls",
Metadata: map[string]string{
"path": path,
},
})
require.NoError(t, err)
require.NotNil(t, res)
require.NotEmpty(t, res.Data)
list := lsOperationResponse{}
err = json.Unmarshal(res.Data, &list)
require.NoError(t, err)
return list
}
func listPins(t *testing.T, b *IPFSBinding) pinLsOperationResponse {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "pin-ls",
})
require.NoError(t, err)
require.NotNil(t, res)
require.NotEmpty(t, res.Data)
list := pinLsOperationResponse{}
err = json.Unmarshal(res.Data, &list)
require.NoError(t, err)
return list
}
func listPinnedCids(t *testing.T, b *IPFSBinding) []string {
list := listPins(t, b)
require.NotEmpty(t, list)
cids := make([]string, len(list))
for i, e := range list {
cids[i] = e.Cid
}
sort.Strings(cids)
return cids
}
func addDocument(t *testing.T, b *IPFSBinding, data []byte, metadata map[string]string) string {
res, err := b.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: "create",
Data: data,
Metadata: metadata,
})
require.NoError(t, err)
require.NotNil(t, res)
require.NotEmpty(t, res.Data)
o := addOperationResponse{}
err = json.Unmarshal(res.Data, &o)
require.NoError(t, err)
require.NotEmpty(t, o.Path)
return o.Path
}
func compareHash(t *testing.T, data []byte, expect string) {
require.NotEmpty(t, data)
h := sha256.Sum256(data)
digest := hex.EncodeToString(h[:])
assert.Equal(t, expect, digest)
}

130
bindings/ipfs/metadata.go Normal file
View File

@ -0,0 +1,130 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"fmt"
"io"
"strings"
ipfsOptions "github.com/ipfs/interface-go-ipfs-core/options"
ipfsConfig "github.com/ipfs/kubo/config"
ipfsFsrepo "github.com/ipfs/kubo/repo/fsrepo"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/dapr/kit/config"
)
type ipfsMetadata struct {
// Path where to store the IPFS repository
// It will be initialized automatically if needed
// Defaults to the "best known path" set by IPFS
RepoPath string `mapstructure:"repoPath"`
// If set, uses an external IPFS daemon, connecting to its APIs
// Can be a HTTP(S) address or a multi-address
// If set, a local node will not be initialized
ExternalAPI string `mapstructure:"externalAPI"`
// The options below can only be set when a new local repo is being initialized
// List of bootstrap nodes, as a comma-separated string
// If empty, defaults to the official bootstrap nodes provided by the IPFS project
// Users should not modify this unless they're using a private cluster
BootstrapNodes string `mapstructure:"bootstrapNodes"`
// Swarm key to use for connecting to private IPFS networks
// If empty, the node will connect to the default, public IPFS network
// Generate with https://github.com/Kubuxu/go-ipfs-swarm-key-gen
// When using a swarm key, users should also configure the bootstrap nodes
SwarmKey string `mapstructure:"swarmKey"`
// Routing mode: "dht" (default) or "dhtclient"
Routing string `mapstructure:"routing"`
// Max local storage used
// Default: the default value used by go-ipfs (currently, "10GB")
StorageMax string `mapstructure:"storageMax"`
// Watermark for running garbage collection, 0-100 (as a percentage)
// Default: the default value used by go-ipfs (currently, 90)
StorageGCWatermark int64 `mapstructure:"storageGCWatermark"`
// Interval for running garbage collection
// Default: the default value used by go-ipfs (currently, "1h")
StorageGCPeriod string `mapstructure:"storageGCPeriod"`
}
// FromMap initializes the metadata object from a map.
func (m *ipfsMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = config.Decode(mp, m)
if err != nil {
return err
}
}
if m.RepoPath == "" {
m.RepoPath, err = ipfsFsrepo.BestKnownPath()
if err != nil {
return fmt.Errorf("error determining the best known repo path: %v", err)
}
}
return nil
}
// IPFSConfig returns the configuration object for using with the go-ipfs library.
// This is executed only when initializing a new repository.
func (m *ipfsMetadata) IPFSConfig() (*ipfsConfig.Config, error) {
identity, err := ipfsConfig.CreateIdentity(io.Discard, []ipfsOptions.KeyGenerateOption{
ipfsOptions.Key.Type(ipfsOptions.Ed25519Key),
})
if err != nil {
return nil, err
}
cfg, err := ipfsConfig.InitWithIdentity(identity)
if err != nil {
return nil, err
}
if m.BootstrapNodes != "" {
var peers []peer.AddrInfo
peers, err = ipfsConfig.ParseBootstrapPeers(
strings.Split(m.BootstrapNodes, ","),
)
if err != nil {
return nil, fmt.Errorf("invalid value for metadata property 'bootstrapNodes': %v", err)
}
cfg.SetBootstrapPeers(peers)
}
r := strings.ToLower(m.Routing)
switch r {
case "dht", "dhtclient", "dhtserver", "none":
cfg.Routing.Type = ipfsConfig.NewOptionalString(r)
case "":
cfg.Routing.Type = ipfsConfig.NewOptionalString("dht")
default:
return nil, fmt.Errorf("invalid value for metadata property 'routing'")
}
if m.StorageMax != "" {
cfg.Datastore.StorageMax = m.StorageMax
}
if m.StorageGCWatermark != 0 {
cfg.Datastore.StorageGCWatermark = m.StorageGCWatermark
}
if m.StorageGCPeriod != "" {
cfg.Datastore.GCPeriod = m.StorageGCPeriod
}
return cfg, nil
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"encoding/json"
"errors"
"fmt"
ipfsFiles "github.com/ipfs/go-ipfs-files"
ipfsOptions "github.com/ipfs/interface-go-ipfs-core/options"
"github.com/multiformats/go-multihash"
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
)
// Handler for the "add" operation, which adds a new file
func (b *IPFSBinding) addOperation(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
if len(req.Data) == 0 {
return nil, errors.New("data is empty")
}
reqMetadata := &addRequestMetadata{}
err := reqMetadata.FromMap(req.Metadata)
if err != nil {
return nil, err
}
opts, err := reqMetadata.UnixfsAddOptions()
if err != nil {
return nil, err
}
f := ipfsFiles.NewBytesFile(req.Data)
resolved, err := b.ipfsAPI.Unixfs().Add(ctx, f, opts...)
if err != nil {
return nil, err
}
res := addOperationResponse{
Path: resolved.String(),
}
enc, err := json.Marshal(res)
if err != nil {
return nil, err
}
return &bindings.InvokeResponse{
Data: enc,
Metadata: nil,
}, nil
}
type addOperationResponse struct {
Path string `json:"path"`
}
type addRequestMetadata struct {
CidVersion *int `mapstructure:"cidVersion"`
Pin *bool `mapstructure:"pin"`
Hash *string `mapstructure:"hash"`
Inline *bool `mapstructure:"inline"`
InlineLimit *int `mapstructure:"inlineLimit"`
}
func (m *addRequestMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = mapstructure.WeakDecode(mp, m)
if err != nil {
return err
}
}
return nil
}
func (m *addRequestMetadata) UnixfsAddOptions() ([]ipfsOptions.UnixfsAddOption, error) {
opts := []ipfsOptions.UnixfsAddOption{}
if m.CidVersion != nil {
opts = append(opts, ipfsOptions.Unixfs.CidVersion(*m.CidVersion))
}
if m.Pin != nil {
opts = append(opts, ipfsOptions.Unixfs.Pin(*m.Pin))
} else {
opts = append(opts, ipfsOptions.Unixfs.Pin(true))
}
if m.Hash != nil {
hash, ok := multihash.Names[*m.Hash]
if !ok {
return nil, fmt.Errorf("invalid hash %s", *m.Hash)
}
opts = append(opts, ipfsOptions.Unixfs.Hash(hash))
}
if m.Inline != nil {
opts = append(opts, ipfsOptions.Unixfs.Inline(*m.Inline))
}
if m.InlineLimit != nil {
opts = append(opts, ipfsOptions.Unixfs.InlineLimit(*m.InlineLimit))
}
return opts, nil
}

View File

@ -0,0 +1,81 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"errors"
"fmt"
"io"
ipfsFiles "github.com/ipfs/go-ipfs-files"
ipfsPath "github.com/ipfs/interface-go-ipfs-core/path"
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
)
// Handler for the "get" operation, which retrieves a document
func (b *IPFSBinding) getOperation(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
reqMetadata := &getRequestMetadata{}
err := reqMetadata.FromMap(req.Metadata)
if err != nil {
return nil, err
}
if reqMetadata.Path == "" {
return nil, errors.New("metadata property 'path' is empty")
}
p := ipfsPath.New(reqMetadata.Path)
err = p.IsValid()
if err != nil {
return nil, fmt.Errorf("invalid value for metadata property 'path': %v", err)
}
res, err := b.ipfsAPI.Unixfs().Get(ctx, p)
if err != nil {
return nil, err
}
defer res.Close()
f, ok := res.(ipfsFiles.File)
if !ok {
return nil, errors.New("path does not represent a file")
}
data, err := io.ReadAll(f)
if err != nil {
return nil, err
}
return &bindings.InvokeResponse{
Data: data,
Metadata: nil,
}, nil
}
type getRequestMetadata struct {
Path string `mapstructure:"path"`
}
func (m *getRequestMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = mapstructure.WeakDecode(mp, m)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,92 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"encoding/json"
"errors"
"fmt"
ipfsPath "github.com/ipfs/interface-go-ipfs-core/path"
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
)
// Handler for the "ls" operation, which retrieves a document
func (b *IPFSBinding) lsOperation(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
reqMetadata := &lsRequestMetadata{}
err := reqMetadata.FromMap(req.Metadata)
if err != nil {
return nil, err
}
if reqMetadata.Path == "" {
return nil, errors.New("metadata property 'path' is empty")
}
p := ipfsPath.New(reqMetadata.Path)
err = p.IsValid()
if err != nil {
return nil, fmt.Errorf("invalid value for metadata property 'path': %v", err)
}
ls, err := b.ipfsAPI.Unixfs().Ls(ctx, p)
if err != nil {
return nil, err
}
res := lsOperationResponse{}
for e := range ls {
if e.Err != nil {
return nil, e.Err
}
res = append(res, lsOperationResponseItem{
Name: e.Name,
Size: e.Size,
Type: e.Type.String(),
Cid: e.Cid.String(),
})
}
j, _ := json.Marshal(res)
return &bindings.InvokeResponse{
Data: j,
Metadata: nil,
}, nil
}
type lsOperationResponseItem struct {
Name string `json:"name,omitempty"`
Size uint64 `json:"size,omitempty"`
Cid string `json:"cid,omitempty"`
Type string `json:"type,omitempty"`
}
type lsOperationResponse []lsOperationResponseItem
type lsRequestMetadata struct {
Path string `mapstructure:"path"`
}
func (m *lsRequestMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = mapstructure.WeakDecode(mp, m)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,84 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"errors"
"fmt"
ipfsOptions "github.com/ipfs/interface-go-ipfs-core/options"
ipfsPath "github.com/ipfs/interface-go-ipfs-core/path"
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
)
// Handler for the "pin-add" operation, which adds a new pin
func (b *IPFSBinding) pinAddOperation(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
reqMetadata := &pinAddRequestMetadata{}
err := reqMetadata.FromMap(req.Metadata)
if err != nil {
return nil, err
}
if reqMetadata.Path == "" {
return nil, errors.New("metadata property 'path' is empty")
}
p := ipfsPath.New(reqMetadata.Path)
err = p.IsValid()
if err != nil {
return nil, fmt.Errorf("invalid value for metadata property 'path': %v", err)
}
opts, err := reqMetadata.PinAddOptions()
if err != nil {
return nil, err
}
err = b.ipfsAPI.Pin().Add(ctx, p, opts...)
if err != nil {
return nil, err
}
return &bindings.InvokeResponse{
Data: nil,
Metadata: nil,
}, nil
}
type pinAddRequestMetadata struct {
Path string `mapstructure:"path"`
Recursive *bool `mapstructure:"recursive"`
}
func (m *pinAddRequestMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = mapstructure.WeakDecode(mp, m)
if err != nil {
return err
}
}
return nil
}
func (m *pinAddRequestMetadata) PinAddOptions() ([]ipfsOptions.PinAddOption, error) {
opts := []ipfsOptions.PinAddOption{}
if m.Recursive != nil {
opts = append(opts, ipfsOptions.Pin.Recursive(*m.Recursive))
} else {
opts = append(opts, ipfsOptions.Pin.Recursive(true))
}
return opts, nil
}

View File

@ -0,0 +1,105 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"encoding/json"
"fmt"
"strings"
ipfsOptions "github.com/ipfs/interface-go-ipfs-core/options"
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
)
// Handler for the "pin-ls" operation, which removes a pin
func (b *IPFSBinding) pinLsOperation(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
reqMetadata := &pinLsRequestMetadata{}
err := reqMetadata.FromMap(req.Metadata)
if err != nil {
return nil, err
}
opts, err := reqMetadata.PinLsOptions()
if err != nil {
return nil, err
}
ls, err := b.ipfsAPI.Pin().Ls(ctx, opts...)
if err != nil {
return nil, err
}
res := pinLsOperationResponse{}
for e := range ls {
err = e.Err()
if err != nil {
return nil, err
}
res = append(res, pinLsOperationResponseItem{
Type: e.Type(),
Cid: e.Path().Cid().String(),
})
}
j, _ := json.Marshal(res)
return &bindings.InvokeResponse{
Data: j,
Metadata: nil,
}, nil
}
type pinLsOperationResponseItem struct {
Cid string `json:"cid,omitempty"`
Type string `json:"type,omitempty"`
}
type pinLsOperationResponse []pinLsOperationResponseItem
type pinLsRequestMetadata struct {
Type *string `mapstructure:"type"`
}
func (m *pinLsRequestMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = mapstructure.WeakDecode(mp, m)
if err != nil {
return err
}
}
return nil
}
func (m *pinLsRequestMetadata) PinLsOptions() ([]ipfsOptions.PinLsOption, error) {
opts := []ipfsOptions.PinLsOption{}
if m.Type != nil {
switch strings.ToLower(*m.Type) {
case "direct":
opts = append(opts, ipfsOptions.Pin.Ls.Direct())
case "recursive":
opts = append(opts, ipfsOptions.Pin.Ls.Recursive())
case "indirect":
opts = append(opts, ipfsOptions.Pin.Ls.Indirect())
case "all":
opts = append(opts, ipfsOptions.Pin.Ls.All())
default:
return nil, fmt.Errorf("invalid value for metadata property 'type'")
}
} else {
opts = append(opts, ipfsOptions.Pin.Ls.All())
}
return opts, nil
}

View File

@ -0,0 +1,84 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipfs
import (
"context"
"errors"
"fmt"
ipfsOptions "github.com/ipfs/interface-go-ipfs-core/options"
ipfsPath "github.com/ipfs/interface-go-ipfs-core/path"
"github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings"
)
// Handler for the "pin-rm" operation, which removes a pin
func (b *IPFSBinding) pinRmOperation(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
reqMetadata := &pinRmRequestMetadata{}
err := reqMetadata.FromMap(req.Metadata)
if err != nil {
return nil, err
}
if reqMetadata.Path == "" {
return nil, errors.New("metadata property 'path' is empty")
}
p := ipfsPath.New(reqMetadata.Path)
err = p.IsValid()
if err != nil {
return nil, fmt.Errorf("invalid value for metadata property 'path': %v", err)
}
opts, err := reqMetadata.PinRmOptions()
if err != nil {
return nil, err
}
err = b.ipfsAPI.Pin().Rm(ctx, p, opts...)
if err != nil {
return nil, err
}
return &bindings.InvokeResponse{
Data: nil,
Metadata: nil,
}, nil
}
type pinRmRequestMetadata struct {
Path string `mapstructure:"path"`
Recursive *bool `mapstructure:"recursive"`
}
func (m *pinRmRequestMetadata) FromMap(mp map[string]string) (err error) {
if len(mp) > 0 {
err = mapstructure.WeakDecode(mp, m)
if err != nil {
return err
}
}
return nil
}
func (m *pinRmRequestMetadata) PinRmOptions() ([]ipfsOptions.PinRmOption, error) {
opts := []ipfsOptions.PinRmOption{}
if m.Recursive != nil {
opts = append(opts, ipfsOptions.Pin.RmRecursive(*m.Recursive))
} else {
opts = append(opts, ipfsOptions.Pin.RmRecursive(true))
}
return opts, nil
}

View File

@ -38,7 +38,7 @@ type Binding struct {
}
// NewKafka returns a new kafka binding instance.
func NewKafka(logger logger.Logger) *Binding {
func NewKafka(logger logger.Logger) bindings.InputOutputBinding {
k := kafka.NewKafka(logger)
// in kafka binding component, disable consumer retry by default
k.DefaultConsumeRetryEnabled = false

View File

@ -31,10 +31,10 @@ import (
)
type kubernetesInput struct {
kubeClient kubernetes.Interface
namespace string
resyncPeriodInSec time.Duration
logger logger.Logger
kubeClient kubernetes.Interface
namespace string
resyncPeriod time.Duration
logger logger.Logger
}
type EventResponse struct {
@ -68,9 +68,9 @@ func (k *kubernetesInput) parseMetadata(metadata bindings.Metadata) error {
intval, err := strconv.ParseInt(val, 10, 64)
if err != nil {
k.logger.Warnf("invalid resyncPeriodInSec %s; %v; defaulting to 10s", val, err)
k.resyncPeriodInSec = time.Second * 10
k.resyncPeriod = time.Second * 10
} else {
k.resyncPeriodInSec = time.Second * time.Duration(intval)
k.resyncPeriod = time.Second * time.Duration(intval)
}
}
@ -84,11 +84,11 @@ func (k *kubernetesInput) Read(ctx context.Context, handler bindings.Handler) er
k.namespace,
fields.Everything(),
)
var resultChan chan EventResponse = make(chan EventResponse)
resultChan := make(chan EventResponse)
_, controller := cache.NewInformer(
watchlist,
&v1.Event{},
k.resyncPeriodInSec,
k.resyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if obj != nil {

View File

@ -34,7 +34,7 @@ func TestParseMetadata(t *testing.T) {
i.parseMetadata(m)
assert.Equal(t, nsName, i.namespace, "The namespaces should be the same.")
assert.Equal(t, resyncPeriod, i.resyncPeriodInSec, "The resyncPeriod should be the same.")
assert.Equal(t, resyncPeriod, i.resyncPeriod, "The resyncPeriod should be the same.")
})
t.Run("parse metadata no namespace", func(t *testing.T) {
m := bindings.Metadata{}
@ -55,6 +55,6 @@ func TestParseMetadata(t *testing.T) {
assert.Nil(t, err, "Expected err to be nil.")
assert.Equal(t, nsName, i.namespace, "The namespaces should be the same.")
assert.Equal(t, time.Second*10, i.resyncPeriodInSec, "The resyncPeriod should be the same.")
assert.Equal(t, time.Second*10, i.resyncPeriod, "The resyncPeriod should be the same.")
})
}

View File

@ -19,7 +19,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"os"
"path/filepath"
"strconv"
@ -52,7 +52,7 @@ type createResponse struct {
}
// NewLocalStorage returns a new LocalStorage instance.
func NewLocalStorage(logger logger.Logger) *LocalStorage {
func NewLocalStorage(logger logger.Logger) bindings.OutputBinding {
return &LocalStorage{logger: logger}
}
@ -153,7 +153,7 @@ func (ls *LocalStorage) get(filename string, req *bindings.InvokeRequest) (*bind
return nil, err
}
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
ls.logger.Debugf("%s", err)

View File

@ -25,7 +25,7 @@ import (
func TestParseMetadata(t *testing.T) {
m := bindings.Metadata{}
m.Properties = map[string]string{"rootPath": "/files"}
localStorage := NewLocalStorage(logger.NewLogger("test"))
localStorage := NewLocalStorage(logger.NewLogger("test")).(*LocalStorage)
meta, err := localStorage.parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, "/files", meta.RootPath)

View File

@ -13,8 +13,9 @@ limitations under the License.
package bindings
import "github.com/dapr/components-contrib/metadata"
// Metadata represents a set of binding specific properties.
type Metadata struct {
Name string
Properties map[string]string `json:"properties"`
metadata.Base `json:",inline"`
}

View File

@ -69,7 +69,7 @@ type MQTT struct {
}
// NewMQTT returns a new MQTT instance.
func NewMQTT(logger logger.Logger) *MQTT {
func NewMQTT(logger logger.Logger) bindings.InputOutputBinding {
return &MQTT{logger: logger}
}

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
mdata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -21,7 +22,7 @@ const (
// listener 1883
// allow_anonymous true
// And run:
// nolint:misspell
//nolint:misspell
// docker run -d -v mosquitto.conf:/mosquitto/config/mosquitto.conf --name test-mqtt -p 1883:1883 eclipse-mosquitto:2
// In that case the connection string will be: tcp://127.0.0.1:1883
testMQTTConnectionStringEnvKey = "DAPR_TEST_MQTT_URL"
@ -47,7 +48,7 @@ func TestInvokeWithTopic(t *testing.T) {
const msgCustomized = "hello from customized"
dataCustomized := []byte(msgCustomized)
metadata := bindings.Metadata{
metadata := bindings.Metadata{Base: mdata.Base{
Name: "testQueue",
Properties: map[string]string{
"consumerID": uuid.NewString(),
@ -58,11 +59,11 @@ func TestInvokeWithTopic(t *testing.T) {
"cleanSession": "true",
"backOffMaxRetries": "0",
},
}
}}
logger := logger.NewLogger("test")
r := NewMQTT(logger)
r := NewMQTT(logger).(*MQTT)
err := r.Init(metadata)
assert.Nil(t, err)

View File

@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
mdata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -41,9 +42,9 @@ func TestParseMetadata(t *testing.T) {
t.Run("metadata is correct", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{
fakeMetaData := bindings.Metadata{Base: mdata.Base{
Properties: fakeProperties,
}
}}
m, err := parseMQTTMetaData(fakeMetaData)
@ -57,7 +58,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("missing topic", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties["topic"] = ""
_, err := parseMQTTMetaData(fakeMetaData)
@ -67,7 +68,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("missing consumerID", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties["consumerID"] = ""
_, err := parseMQTTMetaData(fakeMetaData)
@ -78,7 +79,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("url is not given", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttURL] = ""
m, err := parseMQTTMetaData(fakeMetaData)
@ -91,7 +92,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("qos and retain is not given", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttQOS] = ""
fakeMetaData.Properties[mqttRetain] = ""
@ -107,7 +108,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("invalid clean session field", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttCleanSession] = "randomString"
m, err := parseMQTTMetaData(fakeMetaData)
@ -120,7 +121,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("invalid ca certificate", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttCACert] = "randomNonPEMBlockCA"
_, err := parseMQTTMetaData(fakeMetaData)
@ -130,7 +131,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("valid ca certificate", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttCACert] = "-----BEGIN CERTIFICATE-----\nMIICyDCCAbACCQDb8BtgvbqW5jANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJJ\nTjEXMBUGA1UEAwwOZGFwck1xdHRUZXN0Q0EwHhcNMjAwODEyMDY1MzU4WhcNMjUw\nODEyMDY1MzU4WjAmMQswCQYDVQQGEwJJTjEXMBUGA1UEAwwOZGFwck1xdHRUZXN0\nQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEXte1GBxFJaygsEnK\nHV2AxazZW6Vppv+i50AuURHcaGo0i8G5CTfHzSKrYtTFfBskUspl+2N8GPV5c8Eb\ng+PP6YFn1wiHVz+wRSk3BD35DcGOT2o4XsJw5tiAzJkbpAOYCYl7KAM+BtOf41uC\nd6TdqmawhRGtv1ND2WtyJOT6A3KcUfjhL4TFEhWoljPJVay4TQoJcZMAImD/Xcxw\n6urv6wmUJby3/RJ3I46ZNH3zxEw5vSq1TuzuXxQmfPJG0ZPKJtQZ2nkZ3PNZe4bd\nNUa83YgQap7nBhYdYMMsQyLES2qy3mPcemBVoBWRGODel4PMEcsQiOhAyloAF2d3\nhd+LAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAK13X5JYBy78vHYoP0Oq9fe5XBbL\nuRM8YLnet9b/bXTGG4SnCCOGqWz99swYK7SVyR5l2h8SAoLzeNV61PtaZ6fHrbar\noxSL7BoRXOhMH6LQATadyvwlJ71uqlagqya7soaPK09TtfzeebLT0QkRCWT9b9lQ\nDBvBVCaFidynJL1ts21m5yUdIY4JSu4sGZGb4FRGFdBv/hD3wH8LAkOppsSv3C/Q\nkfkDDSQzYbdMoBuXmafvi3He7Rv+e6Tj9or1rrWdx0MIKlZPzz4DOe5Rh112uRB9\n7xPHJt16c+Ya3DKpchwwdNcki0vFchlpV96HK8sMCoY9kBzPhkEQLdiBGv4=\n-----END CERTIFICATE-----\n"
m, err := parseMQTTMetaData(fakeMetaData)
@ -146,7 +147,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("invalid client certificate", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttClientCert] = "randomNonPEMBlockClientCert"
_, err := parseMQTTMetaData(fakeMetaData)
@ -156,7 +157,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("valid client certificate", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttClientCert] = "-----BEGIN CERTIFICATE-----\nMIICzDCCAbQCCQDBKDMS3SHsDzANBgkqhkiG9w0BAQUFADAmMQswCQYDVQQGEwJJ\nTjEXMBUGA1UEAwwOZGFwck1xdHRUZXN0Q0EwHhcNMjAwODEyMDY1NTE1WhcNMjEw\nODA3MDY1NTE1WjAqMQswCQYDVQQGEwJJTjEbMBkGA1UEAwwSZGFwck1xdHRUZXN0\nQ2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5IDfsGI2pb4W\nt3CjckrKuNeTrgmla3sXxSI5wfDgLGd/XkNu++M6yi9ABaBiYChpxbylqIeAn/HT\n3r/nhcb+bldMtEkU9tODHy/QDhvN2UGFjRsMfzO9p1oMpTnRdJCHYinE+oqVced5\nHI+UEofAU+1eiIXqJGKrdfn4gvaHst4QfVPvui8WzJq9TMkEhEME+5hs3VKyKZr2\nqjIxzr7nLVod3DBf482VjxRI06Ip3fPvNuMWwzj2G+Rj8PMcBjoKeCLQL9uQh7f1\nTWHuACqNIrmFEUQWdGETnRjHWIvw0NEL40+Ur2b5+7/hoqnTzReJ3XUe1jM3l44f\nl0rOf4hu2QIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAT9yoIeX0LTsvx7/b+8V3a\nkP+j8u97QCc8n5xnMpivcMEk5cfqXX5Llv2EUJ9kBsynrJwT7ujhTJXSA/zb2UdC\nKH8PaSrgIlLwQNZMDofbz6+zPbjStkgne/ZQkTDIxY73sGpJL8LsQVO9p2KjOpdj\nSf9KuJhLzcHolh7ry3ZrkOg+QlMSvseeDRAxNhpkJrGQ6piXoUiEeKKNa0rWTMHx\nIP1Hqj+hh7jgqoQR48NL2jNng7I64HqTl6Mv2fiNfINiw+5xmXTB0QYkGU5NvPBO\naKcCRcGlU7ND89BogQPZsl/P04tAuQqpQWffzT4sEEOyWSVGda4N2Ys3GSQGBv8e\n-----END CERTIFICATE-----\n"
m, err := parseMQTTMetaData(fakeMetaData)
@ -172,7 +173,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("invalid client certificate key", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttClientKey] = "randomNonPEMBlockClientKey"
_, err := parseMQTTMetaData(fakeMetaData)
@ -182,7 +183,7 @@ func TestParseMetadata(t *testing.T) {
t.Run("valid client certificate key", func(t *testing.T) {
fakeProperties := getFakeProperties()
fakeMetaData := bindings.Metadata{Name: "binging-test", Properties: fakeProperties}
fakeMetaData := bindings.Metadata{Base: mdata.Base{Name: "binging-test", Properties: fakeProperties}}
fakeMetaData.Properties[mqttClientKey] = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA5IDfsGI2pb4Wt3CjckrKuNeTrgmla3sXxSI5wfDgLGd/XkNu\n++M6yi9ABaBiYChpxbylqIeAn/HT3r/nhcb+bldMtEkU9tODHy/QDhvN2UGFjRsM\nfzO9p1oMpTnRdJCHYinE+oqVced5HI+UEofAU+1eiIXqJGKrdfn4gvaHst4QfVPv\nui8WzJq9TMkEhEME+5hs3VKyKZr2qjIxzr7nLVod3DBf482VjxRI06Ip3fPvNuMW\nwzj2G+Rj8PMcBjoKeCLQL9uQh7f1TWHuACqNIrmFEUQWdGETnRjHWIvw0NEL40+U\nr2b5+7/hoqnTzReJ3XUe1jM3l44fl0rOf4hu2QIDAQABAoIBAQCVMINb4TP20P55\n9IPyqlxjhPT563hijXK+lhMJyiBDPavOOs7qjLikq2bshYPVbm1o2jt6pkXXqAeB\n5t/d20fheQQurYyPfxecNBZuL78duwbcUy28m2aXLlcVRYO4zGhoMgdW4UajoNLV\nT/UIiDONWGyhTHXMHdP+6h9UOmvs3o4b225AuLrw9n6QO5I1Se8lcfOTIqR1fy4O\nGsUWEQPdW0X3Dhgpx7kDIuBTAQzbjD31PCR1U8h2wsCeEe6hPCrsMbo/D019weol\ndi40tbWR1/oNz0+vro2d9YDPJkXN0gmpT51Z4YJoexZBdyzO5z4DMSdn5yczzt6p\nQq8LsXAFAoGBAPYXRbC4OxhtuC+xr8KRkaCCMjtjUWFbFWf6OFgUS9b5uPz9xvdY\nXo7wBP1zp2dS8yFsdIYH5Six4Z5iOuDR4sVixzjabhwedL6bmS1zV5qcCWeASKX1\nURgSkfMmC4Tg3LBgZ9YxySFcVRjikxljkS3eK7Mp7Xmj5afe7qV73TJfAoGBAO20\nTtw2RGe02xnydZmmwf+NpQHOA9S0JsehZA6NRbtPEN/C8bPJIq4VABC5zcH+tfYf\nzndbDlGhuk+qpPA590rG5RSOUjYnQFq7njdSfFyok9dXSZQTjJwFnG2oy0LmgjCe\nROYnbCzD+a+gBKV4xlo2M80OLakQ3zOwPT0xNRnHAoGATLEj/tbrU8mdxP9TDwfe\nom7wyKFDE1wXZ7gLJyfsGqrog69y+lKH5XPXmkUYvpKTQq9SARMkz3HgJkPmpXnD\nelA2Vfl8pza2m1BShF+VxZErPR41hcLV6vKemXAZ1udc33qr4YzSaZskygSSYy8s\nZ2b9p3BBmc8CGzbWmKvpW3ECgYEAn7sFLxdMWj/+5221Nr4HKPn+wrq0ek9gq884\n1Ep8bETSOvrdvolPQ5mbBKJGsLC/h5eR/0Rx18sMzpIF6eOZ2GbU8z474mX36cCf\nrd9A8Gbbid3+9IE6gHGIz2uYwujw3UjNVbdyCpbahvjJhoQlDePUZVu8tRpAUpSA\nYklZvGsCgYBuIlOFTNGMVUnwfzrcS9a/31LSvWTZa8w2QFjsRPMYFezo2l4yWs4D\nPEpeuoJm+Gp6F6ayjoeyOw9mvMBH5hAZr4WjbiU6UodzEHREAsLAzCzcRyIpnDE6\nPW1c3j60r8AHVufkWTA+8B9WoLC5MqcYTV3beMGnNGGqS2PeBom63Q==\n-----END RSA PRIVATE KEY-----\n"
m, err := parseMQTTMetaData(fakeMetaData)
@ -198,7 +199,7 @@ func TestParseMetadata(t *testing.T) {
topic := "/topic/where/the/data/is/from"
logger := logger.NewLogger("test")
m := NewMQTT(logger)
m := NewMQTT(logger).(*MQTT)
m.ctx, m.cancel = context.WithCancel(context.Background())
m.handleMessage(context.Background(), func(ctx context.Context, r *bindings.ReadResponse) ([]byte, error) {

View File

@ -21,7 +21,7 @@ import (
"database/sql/driver"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"strconv"
"time"
@ -76,7 +76,7 @@ type Mysql struct {
}
// NewMysql returns a new MySQL output binding.
func NewMysql(logger logger.Logger) *Mysql {
func NewMysql(logger logger.Logger) bindings.OutputBinding {
return &Mysql{logger: logger}
}
@ -155,7 +155,7 @@ func (m *Mysql) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindi
},
}
switch req.Operation { // nolint: exhaustive
switch req.Operation { //nolint:exhaustive
case execOperation:
r, err := m.exec(ctx, s)
if err != nil {
@ -263,7 +263,7 @@ func initDB(url, pemPath string) (*sql.DB, error) {
if pemPath != "" {
rootCertPool := x509.NewCertPool()
pem, err := ioutil.ReadFile(pemPath)
pem, err := os.ReadFile(pemPath)
if err != nil {
return nil, errors.Wrapf(err, "Error reading PEM file from %s", pemPath)
}

View File

@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -72,8 +73,8 @@ func TestMysqlIntegration(t *testing.T) {
t.SkipNow()
}
b := NewMysql(logger.NewLogger("test"))
m := bindings.Metadata{Properties: map[string]string{connectionURLKey: url}}
b := NewMysql(logger.NewLogger("test")).(*Mysql)
m := bindings.Metadata{Base: metadata.Base{Properties: map[string]string{connectionURLKey: url}}}
if err := b.Init(m); err != nil {
t.Fatal(err)
}

View File

@ -180,7 +180,7 @@ func mockDatabase(t *testing.T) (*Mysql, sqlmock.Sqlmock, error) {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
m := NewMysql(logger.NewLogger("test"))
m := NewMysql(logger.NewLogger("test")).(*Mysql)
m.db = db
return m, mock, err

View File

@ -32,6 +32,6 @@ func PingOutBinding(outputBinding OutputBinding) error {
if outputBindingWithPing, ok := outputBinding.(health.Pinger); ok {
return outputBindingWithPing.Ping()
} else {
return fmt.Errorf("Ping is not implemented by this output binding")
return fmt.Errorf("ping is not implemented by this output binding")
}
}

View File

@ -43,7 +43,7 @@ type Postgres struct {
}
// NewPostgres returns a new PostgreSQL output binding.
func NewPostgres(logger logger.Logger) *Postgres {
func NewPostgres(logger logger.Logger) bindings.OutputBinding {
return &Postgres{logger: logger}
}
@ -107,7 +107,7 @@ func (p *Postgres) Invoke(ctx context.Context, req *bindings.InvokeRequest) (res
},
}
switch req.Operation { // nolint: exhaustive
switch req.Operation { //nolint:exhaustive
case execOperation:
r, err := p.exec(ctx, sql)
if err != nil {
@ -154,13 +154,13 @@ func (p *Postgres) query(ctx context.Context, sql string) (result []byte, err er
return nil, errors.Wrapf(err, "error executing %s", sql)
}
rs := make([]interface{}, 0)
rs := make([]any, 0)
for rows.Next() {
val, rowErr := rows.Values()
if rowErr != nil {
return nil, errors.Wrapf(rowErr, "error parsing result: %v", rows.Err())
}
rs = append(rs, val)
rs = append(rs, val) //nolint:asasalint
}
if result, err = json.Marshal(rs); err != nil {

View File

@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -61,8 +62,8 @@ func TestPostgresIntegration(t *testing.T) {
}
// live DB test
b := NewPostgres(logger.NewLogger("test"))
m := bindings.Metadata{Properties: map[string]string{connectionURLKey: url}}
b := NewPostgres(logger.NewLogger("test")).(*Postgres)
m := bindings.Metadata{Base: metadata.Base{Properties: map[string]string{connectionURLKey: url}}}
if err := b.Init(m); err != nil {
t.Fatal(err)
}

View File

@ -43,7 +43,7 @@ type postmarkMetadata struct {
}
// NewPostmark returns a new Postmark bindings instance.
func NewPostmark(logger logger.Logger) *Postmark {
func NewPostmark(logger logger.Logger) bindings.OutputBinding {
return &Postmark{logger: logger}
}

View File

@ -25,7 +25,7 @@ import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/internal/utils"
contrib_metadata "github.com/dapr/components-contrib/metadata"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -65,7 +65,7 @@ type rabbitMQMetadata struct {
}
// NewRabbitMQ returns a new rabbitmq instance.
func NewRabbitMQ(logger logger.Logger) *RabbitMQ {
func NewRabbitMQ(logger logger.Logger) bindings.InputOutputBinding {
return &RabbitMQ{logger: logger}
}
@ -110,13 +110,13 @@ func (r *RabbitMQ) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bi
Body: req.Data,
}
contentType, ok := contrib_metadata.TryGetContentType(req.Metadata)
contentType, ok := contribMetadata.TryGetContentType(req.Metadata)
if ok {
pub.ContentType = contentType
}
ttl, ok, err := contrib_metadata.TryGetTTL(req.Metadata)
ttl, ok, err := contribMetadata.TryGetTTL(req.Metadata)
if err != nil {
return nil, err
}
@ -128,7 +128,7 @@ func (r *RabbitMQ) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bi
pub.Expiration = strconv.FormatInt(ttl.Milliseconds(), 10)
}
priority, ok, err := contrib_metadata.TryGetPriority(req.Metadata)
priority, ok, err := contribMetadata.TryGetPriority(req.Metadata)
if err != nil {
return nil, err
}
@ -196,7 +196,7 @@ func (r *RabbitMQ) parseMetadata(metadata bindings.Metadata) error {
m.MaxPriority = &maxPriority
}
ttl, ok, err := contrib_metadata.TryGetTTL(metadata.Properties)
ttl, ok, err := contribMetadata.TryGetTTL(metadata.Properties)
if err != nil {
return err
}

View File

@ -30,7 +30,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
contrib_metadata "github.com/dapr/components-contrib/metadata"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -72,17 +72,17 @@ func TestQueuesWithTTL(t *testing.T) {
metadata := bindings.Metadata{
Name: "testQueue",
Properties: map[string]string{
"queueName": queueName,
"host": rabbitmqHost,
"deleteWhenUnused": strconv.FormatBool(exclusive),
"durable": strconv.FormatBool(durable),
contrib_metadata.TTLMetadataKey: strconv.FormatInt(ttlInSeconds, 10),
"queueName": queueName,
"host": rabbitmqHost,
"deleteWhenUnused": strconv.FormatBool(exclusive),
"durable": strconv.FormatBool(durable),
contribMetadata.TTLMetadataKey: strconv.FormatInt(ttlInSeconds, 10),
},
}
logger := logger.NewLogger("test")
r := NewRabbitMQ(logger)
r := NewRabbitMQ(logger).(*RabbitMQ)
err := r.Init(metadata)
assert.Nil(t, err)
@ -139,7 +139,7 @@ func TestPublishingWithTTL(t *testing.T) {
logger := logger.NewLogger("test")
rabbitMQBinding1 := NewRabbitMQ(logger)
rabbitMQBinding1 := NewRabbitMQ(logger).(*RabbitMQ)
err := rabbitMQBinding1.Init(metadata)
assert.Nil(t, err)
@ -156,7 +156,7 @@ func TestPublishingWithTTL(t *testing.T) {
writeRequest := bindings.InvokeRequest{
Data: []byte(tooLateMsgContent),
Metadata: map[string]string{
contrib_metadata.TTLMetadataKey: strconv.Itoa(ttlInSeconds),
contribMetadata.TTLMetadataKey: strconv.Itoa(ttlInSeconds),
},
}
@ -170,7 +170,7 @@ func TestPublishingWithTTL(t *testing.T) {
assert.False(t, ok)
// Getting before it is expired, should return it
rabbitMQBinding2 := NewRabbitMQ(logger)
rabbitMQBinding2 := NewRabbitMQ(logger).(*RabbitMQ)
err = rabbitMQBinding2.Init(metadata)
assert.Nil(t, err)
@ -178,7 +178,7 @@ func TestPublishingWithTTL(t *testing.T) {
writeRequest = bindings.InvokeRequest{
Data: []byte(testMsgContent),
Metadata: map[string]string{
contrib_metadata.TTLMetadataKey: strconv.Itoa(ttlInSeconds * 1000),
contribMetadata.TTLMetadataKey: strconv.Itoa(ttlInSeconds * 1000),
},
}
_, err = rabbitMQBinding2.Invoke(context.Backgound(), &writeRequest)
@ -204,18 +204,18 @@ func TestExclusiveQueue(t *testing.T) {
metadata := bindings.Metadata{
Name: "testQueue",
Properties: map[string]string{
"queueName": queueName,
"host": rabbitmqHost,
"deleteWhenUnused": strconv.FormatBool(exclusive),
"durable": strconv.FormatBool(durable),
"exclusive": strconv.FormatBool(exclusive),
contrib_metadata.TTLMetadataKey: strconv.FormatInt(ttlInSeconds, 10),
"queueName": queueName,
"host": rabbitmqHost,
"deleteWhenUnused": strconv.FormatBool(exclusive),
"durable": strconv.FormatBool(durable),
"exclusive": strconv.FormatBool(exclusive),
contribMetadata.TTLMetadataKey: strconv.FormatInt(ttlInSeconds, 10),
},
}
logger := logger.NewLogger("test")
r := NewRabbitMQ(logger)
r := NewRabbitMQ(logger).(*RabbitMQ)
err := r.Init(metadata)
assert.Nil(t, err)
@ -267,7 +267,7 @@ func TestPublishWithPriority(t *testing.T) {
logger := logger.NewLogger("test")
r := NewRabbitMQ(logger)
r := NewRabbitMQ(logger).(*RabbitMQ)
err := r.Init(metadata)
assert.Nil(t, err)
@ -283,7 +283,7 @@ func TestPublishWithPriority(t *testing.T) {
const middlePriorityMsgContent = "middle"
_, err = r.Invoke(context.Backgound(), &bindings.InvokeRequest{
Metadata: map[string]string{
contrib_metadata.PriorityMetadataKey: "5",
contribMetadata.PriorityMetadataKey: "5",
},
Data: []byte(middlePriorityMsgContent),
})
@ -292,7 +292,7 @@ func TestPublishWithPriority(t *testing.T) {
const lowPriorityMsgContent = "low"
_, err = r.Invoke(context.Backgound(), &bindings.InvokeRequest{
Metadata: map[string]string{
contrib_metadata.PriorityMetadataKey: "1",
contribMetadata.PriorityMetadataKey: "1",
},
Data: []byte(lowPriorityMsgContent),
})
@ -301,7 +301,7 @@ func TestPublishWithPriority(t *testing.T) {
const highPriorityMsgContent = "high"
_, err = r.Invoke(context.Backgound(), &bindings.InvokeRequest{
Metadata: map[string]string{
contrib_metadata.PriorityMetadataKey: "10",
contribMetadata.PriorityMetadataKey: "10",
},
Data: []byte(highPriorityMsgContent),
})

View File

@ -36,7 +36,7 @@ type Redis struct {
}
// NewRedis returns a new redis bindings instance.
func NewRedis(logger logger.Logger) *Redis {
func NewRedis(logger logger.Logger) bindings.OutputBinding {
return &Redis{logger: logger}
}

View File

@ -43,7 +43,7 @@ type StateConfig struct {
}
// NewRethinkDBStateChangeBinding returns a new RethinkDB actor event input binding.
func NewRethinkDBStateChangeBinding(logger logger.Logger) *Binding {
func NewRethinkDBStateChangeBinding(logger logger.Logger) bindings.InputBinding {
return &Binding{
logger: logger,
}

View File

@ -22,6 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
@ -41,7 +42,7 @@ func getNewRethinkActorBinding() *Binding {
l.SetOutputLevel(logger.DebugLevel)
}
return NewRethinkDBStateChangeBinding(l)
return NewRethinkDBStateChangeBinding(l).(*Binding)
}
/*
@ -63,10 +64,10 @@ func TestBinding(t *testing.T) {
testDuration = d
}
m := bindings.Metadata{
m := bindings.Metadata{Base: metadata.Base{
Name: "test",
Properties: getTestMetadata(),
}
}}
assert.NotNil(t, m.Properties)
b := getNewRethinkActorBinding()

View File

@ -56,7 +56,7 @@ type Metadata struct {
}
// NewSMTP returns a new smtp binding instance.
func NewSMTP(logger logger.Logger) *Mailer {
func NewSMTP(logger logger.Logger) bindings.OutputBinding {
return &Mailer{logger: logger}
}

View File

@ -56,7 +56,7 @@ type sendGridRestError struct {
}
// NewSendGrid returns a new SendGrid bindings instance.
func NewSendGrid(logger logger.Logger) *SendGrid {
func NewSendGrid(logger logger.Logger) bindings.OutputBinding {
return &SendGrid{logger: logger}
}

View File

@ -49,7 +49,7 @@ type twilioMetadata struct {
timeout time.Duration
}
func NewSMS(logger logger.Logger) *SMS {
func NewSMS(logger logger.Logger) bindings.OutputBinding {
return &SMS{
logger: logger,
httpClient: &http.Client{
@ -112,7 +112,7 @@ func (t *SMS) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*binding
vDr := *strings.NewReader(v.Encode())
twilioURL := fmt.Sprintf("%s%s/Messages.json", twilioURLBase, t.metadata.accountSid)
httpReq, err := http.NewRequestWithContext(ctx, "POST", twilioURL, &vDr)
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, twilioURL, &vDr)
if err != nil {
return nil, err
}

View File

@ -16,7 +16,7 @@ package sms
import (
"context"
"errors"
"io/ioutil"
"io"
"net/http"
"strings"
"sync/atomic"
@ -68,14 +68,14 @@ func TestParseDuration(t *testing.T) {
func TestWriteShouldSucceed(t *testing.T) {
httpTransport := &mockTransport{
response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))},
response: &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader(""))},
}
m := bindings.Metadata{}
m.Properties = map[string]string{
"toNumber": "toNumber", "fromNumber": "fromNumber",
"accountSid": "accountSid", "authToken": "authToken",
}
tw := NewSMS(logger.NewLogger("test"))
tw := NewSMS(logger.NewLogger("test")).(*SMS)
tw.httpClient = &http.Client{
Transport: httpTransport,
}
@ -105,14 +105,14 @@ func TestWriteShouldSucceed(t *testing.T) {
func TestWriteShouldFail(t *testing.T) {
httpTransport := &mockTransport{
response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))},
response: &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader(""))},
}
m := bindings.Metadata{}
m.Properties = map[string]string{
"fromNumber": "fromNumber",
"accountSid": "accountSid", "authToken": "authToken",
}
tw := NewSMS(logger.NewLogger("test"))
tw := NewSMS(logger.NewLogger("test")).(*SMS)
tw.httpClient = &http.Client{
Transport: httpTransport,
}

View File

@ -36,7 +36,7 @@ type Binding struct {
}
// NewTwitter returns a new Twitter event input binding.
func NewTwitter(logger logger.Logger) *Binding {
func NewTwitter(logger logger.Logger) bindings.InputOutputBinding {
return &Binding{logger: logger}
}

View File

@ -58,7 +58,7 @@ func getRuntimeMetadata() map[string]string {
// go test -v -count=1 ./bindings/twitter/.
func TestInit(t *testing.T) {
m := getTestMetadata()
tw := NewTwitter(logger.NewLogger("test"))
tw := NewTwitter(logger.NewLogger("test")).(*Binding)
err := tw.Init(m)
assert.Nilf(t, err, "error initializing valid metadata properties")
}
@ -66,7 +66,7 @@ func TestInit(t *testing.T) {
// TestReadError excutes the Read method and fails before the Twitter API call
// go test -v -count=1 -run TestReadError ./bindings/twitter/.
func TestReadError(t *testing.T) {
tw := NewTwitter(logger.NewLogger("test"))
tw := NewTwitter(logger.NewLogger("test")).(*Binding)
m := getTestMetadata()
err := tw.Init(m)
assert.Nilf(t, err, "error initializing valid metadata properties")
@ -90,7 +90,7 @@ func TestRead(t *testing.T) {
m.Properties = getRuntimeMetadata()
// add query
m.Properties["query"] = "microsoft"
tw := NewTwitter(logger.NewLogger("test"))
tw := NewTwitter(logger.NewLogger("test")).(*Binding)
tw.logger.SetOutputLevel(logger.DebugLevel)
err := tw.Init(m)
assert.Nilf(t, err, "error initializing read")
@ -126,7 +126,7 @@ func TestInvoke(t *testing.T) {
}
m := bindings.Metadata{}
m.Properties = getRuntimeMetadata()
tw := NewTwitter(logger.NewLogger("test"))
tw := NewTwitter(logger.NewLogger("test")).(*Binding)
tw.logger.SetOutputLevel(logger.DebugLevel)
err := tw.Init(m)
assert.Nilf(t, err, "error initializing Invoke")

View File

@ -20,16 +20,17 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
func TestParseMetadata(t *testing.T) {
m := bindings.Metadata{Properties: map[string]string{
m := bindings.Metadata{Base: metadata.Base{Properties: map[string]string{
"gatewayAddr": "172.0.0.1:1234",
"gatewayKeepAlive": "5s",
"caCertificatePath": "/cert/path",
"usePlaintextConnection": "true",
}}
}}}
client := ClientFactoryImpl{logger: logger.NewLogger("test")}
meta, err := client.parseMetadata(m)
assert.NoError(t, err)
@ -49,7 +50,7 @@ func TestGatewayAddrMetadataIsMandatory(t *testing.T) {
}
func TestParseMetadataDefaultValues(t *testing.T) {
m := bindings.Metadata{Properties: map[string]string{"gatewayAddr": "172.0.0.1:1234"}}
m := bindings.Metadata{Base: metadata.Base{Properties: map[string]string{"gatewayAddr": "172.0.0.1:1234"}}}
client := ClientFactoryImpl{logger: logger.NewLogger("test")}
meta, err := client.parseMetadata(m)
assert.NoError(t, err)

Some files were not shown because too many files have changed in this diff Show More