Merge branch 'master' into content-type
This commit is contained in:
commit
4f27152ec9
|
@ -16,7 +16,7 @@ resource cosmosDb 'Microsoft.DocumentDB/databaseAccounts@2021-04-15' = {
|
|||
tags: confTestTags
|
||||
properties: {
|
||||
consistencyPolicy: {
|
||||
defaultConsistencyLevel: 'Session'
|
||||
defaultConsistencyLevel: 'Strong' // Needed by conformance test state.go
|
||||
}
|
||||
locations: [
|
||||
{
|
||||
|
|
|
@ -7,9 +7,14 @@ param eventHubsNamespaceName string
|
|||
param rgLocation string = resourceGroup().location
|
||||
param confTestTags object = {}
|
||||
|
||||
var eventHubName = '${eventHubsNamespaceName}-topic'
|
||||
var eventHubPolicyName = '${eventHubName}-policy'
|
||||
var eventHubConsumerGroupName = '${eventHubName}-cg'
|
||||
var eventHubBindingsName = '${eventHubsNamespaceName}-bindings-topic'
|
||||
var eventHubBindingsPolicyName = '${eventHubBindingsName}-policy'
|
||||
var eventHubBindingsConsumerGroupName = '${eventHubBindingsName}-cg'
|
||||
|
||||
var eventHubPubsubName = '${eventHubsNamespaceName}-pubsub-topic'
|
||||
var eventHubPubsubPolicyName = '${eventHubPubsubName}-policy'
|
||||
var eventHubPubsubConsumerGroupName = '${eventHubPubsubName}-cg'
|
||||
|
||||
|
||||
resource eventHubsNamespace 'Microsoft.EventHub/namespaces@2017-04-01' = {
|
||||
name: eventHubsNamespaceName
|
||||
|
@ -18,10 +23,10 @@ resource eventHubsNamespace 'Microsoft.EventHub/namespaces@2017-04-01' = {
|
|||
sku: {
|
||||
name: 'Standard' // For > 1 consumer group
|
||||
}
|
||||
resource eventHub 'eventhubs' = {
|
||||
name: eventHubName
|
||||
resource eventHubPolicy 'authorizationRules' = {
|
||||
name: eventHubPolicyName
|
||||
resource eventHubBindings 'eventhubs' = {
|
||||
name: eventHubBindingsName
|
||||
resource eventHubBindingsPolicy 'authorizationRules' = {
|
||||
name: eventHubBindingsPolicyName
|
||||
properties: {
|
||||
rights: [
|
||||
'Send'
|
||||
|
@ -29,12 +34,31 @@ resource eventHubsNamespace 'Microsoft.EventHub/namespaces@2017-04-01' = {
|
|||
]
|
||||
}
|
||||
}
|
||||
resource consumerGroup 'consumergroups' = {
|
||||
name: eventHubConsumerGroupName
|
||||
resource eventHubBindingsConsumerGroup 'consumergroups' = {
|
||||
name: eventHubBindingsConsumerGroupName
|
||||
}
|
||||
}
|
||||
resource eventHubPubsub 'eventhubs' = {
|
||||
name: eventHubPubsubName
|
||||
resource eventHubPubsubPolicy 'authorizationRules' = {
|
||||
name: eventHubPubsubPolicyName
|
||||
properties: {
|
||||
rights: [
|
||||
'Send'
|
||||
'Listen'
|
||||
]
|
||||
}
|
||||
}
|
||||
resource eventHubPubsubConsumerGroup 'consumergroups' = {
|
||||
name: eventHubPubsubConsumerGroupName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output eventHubName string = eventHubsNamespace::eventHub.name
|
||||
output eventHubPolicyName string = eventHubsNamespace::eventHub::eventHubPolicy.name
|
||||
output eventHubConsumerGroupName string = eventHubsNamespace::eventHub::consumerGroup.name
|
||||
output eventHubBindingsName string = eventHubsNamespace::eventHubBindings.name
|
||||
output eventHubBindingsPolicyName string = eventHubsNamespace::eventHubBindings::eventHubBindingsPolicy.name
|
||||
output eventHubBindingsConsumerGroupName string = eventHubsNamespace::eventHubBindings::eventHubBindingsConsumerGroup.name
|
||||
|
||||
output eventHubPubsubName string = eventHubsNamespace::eventHubPubsub.name
|
||||
output eventHubPubsubPolicyName string = eventHubsNamespace::eventHubPubsub::eventHubPubsubPolicy.name
|
||||
output eventHubPubsubConsumerGroupName string = eventHubsNamespace::eventHubPubsub::eventHubPubsubConsumerGroup.name
|
||||
|
|
|
@ -110,9 +110,12 @@ output cosmosDbSqlName string = cosmosDb.outputs.cosmosDbSqlName
|
|||
output cosmosDbSqlContainerName string = cosmosDb.outputs.cosmosDbSqlContainerName
|
||||
output eventGridTopicName string = eventGridTopic.name
|
||||
output eventHubsNamespace string = eventHubsNamespace.name
|
||||
output eventHubName string = eventHubsNamespace.outputs.eventHubName
|
||||
output eventHubPolicyName string = eventHubsNamespace.outputs.eventHubPolicyName
|
||||
output eventHubConsumerGroupName string = eventHubsNamespace.outputs.eventHubConsumerGroupName
|
||||
output eventHubBindingsName string = eventHubsNamespace.outputs.eventHubBindingsName
|
||||
output eventHubBindingsPolicyName string = eventHubsNamespace.outputs.eventHubBindingsPolicyName
|
||||
output eventHubBindingsConsumerGroupName string = eventHubsNamespace.outputs.eventHubBindingsConsumerGroupName
|
||||
output eventHubPubsubName string = eventHubsNamespace.outputs.eventHubPubsubName
|
||||
output eventHubPubsubPolicyName string = eventHubsNamespace.outputs.eventHubPubsubPolicyName
|
||||
output eventHubPubsubConsumerGroupName string = eventHubsNamespace.outputs.eventHubPubsubConsumerGroupName
|
||||
output keyVaultName string = keyVault.name
|
||||
output serviceBusName string = serviceBus.name
|
||||
output storageName string = storage.name
|
||||
|
|
|
@ -151,8 +151,12 @@ EVENT_GRID_SUB_ID_VAR_NAME="AzureEventGridSubscriptionId"
|
|||
EVENT_GRID_TENANT_ID_VAR_NAME="AzureEventGridTenantId"
|
||||
EVENT_GRID_TOPIC_ENDPOINT_VAR_NAME="AzureEventGridTopicEndpoint"
|
||||
|
||||
EVENT_HUBS_CONNECTION_STRING_VAR_NAME="AzureEventHubsConnectionString"
|
||||
EVENT_HUBS_CONSUMER_GROUP_VAR_NAME="AzureEventHubsConsumerGroup"
|
||||
EVENT_HUBS_BINDINGS_CONNECTION_STRING_VAR_NAME="AzureEventHubsBindingsConnectionString"
|
||||
EVENT_HUBS_BINDINGS_CONSUMER_GROUP_VAR_NAME="AzureEventHubsBindingsConsumerGroup"
|
||||
EVENT_HUBS_BINDINGS_CONTAINER_VAR_NAME="AzureEventHubsBindingsContainer"
|
||||
EVENT_HUBS_PUBSUB_CONNECTION_STRING_VAR_NAME="AzureEventHubsPubsubConnectionString"
|
||||
EVENT_HUBS_PUBSUB_CONSUMER_GROUP_VAR_NAME="AzureEventHubsPubsubConsumerGroup"
|
||||
EVENT_HUBS_PUBSUB_CONTAINER_VAR_NAME="AzureEventHubsPubsubContainer"
|
||||
|
||||
KEYVAULT_CERT_NAME="AzureKeyVaultSecretStoreCert"
|
||||
KEYVAULT_CLIENT_ID_VAR_NAME="AzureKeyVaultSecretStoreClientId"
|
||||
|
@ -198,7 +202,7 @@ echo "Building conf-test-azure.bicep to ${ARM_TEMPLATE_FILE} ..."
|
|||
az bicep build --file conf-test-azure.bicep --outfile "${ARM_TEMPLATE_FILE}"
|
||||
|
||||
echo "Creating azure deployment ${DEPLOY_NAME} in ${DEPLOY_LOCATION} and resource prefix ${PREFIX}-* ..."
|
||||
az deployment sub create --name "${DEPLOY_NAME}" --location "${DEPLOY_LOCATION}" --template-file "${ARM_TEMPLATE_FILE}" --p namePrefix="${PREFIX}" -p adminId="${ADMIN_ID}" -p certAuthSpId="${CERT_AUTH_SP_ID}" -p sdkAuthSpId="${SDK_AUTH_SP_ID}" -p rgLocation="${DEPLOY_LOCATION}"
|
||||
az deployment sub create --name "${DEPLOY_NAME}" --location "${DEPLOY_LOCATION}" --template-file "${ARM_TEMPLATE_FILE}" -p namePrefix="${PREFIX}" -p adminId="${ADMIN_ID}" -p certAuthSpId="${CERT_AUTH_SP_ID}" -p sdkAuthSpId="${SDK_AUTH_SP_ID}" -p rgLocation="${DEPLOY_LOCATION}"
|
||||
|
||||
# Query the deployed resource names from the bicep deployment outputs
|
||||
RESOURCE_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.confTestRgName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
|
@ -219,12 +223,18 @@ EVENT_GRID_TOPIC_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query
|
|||
echo "INFO: EVENT_GRID_TOPIC_NAME=${EVENT_GRID_TOPIC_NAME}"
|
||||
EVENT_HUBS_NAMESPACE="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubsNamespace.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUBS_NAMESPACE=${EVENT_HUBS_NAMESPACE}"
|
||||
EVENT_HUB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUB_NAME=${EVENT_HUB_NAME}"
|
||||
EVENT_HUB_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPolicyName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUB_POLICY_NAME=${EVENT_HUB_POLICY_NAME}"
|
||||
EVENT_HUBS_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubConsumerGroupName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUBS_CONSUMER_GROUP_NAME=${EVENT_HUBS_CONSUMER_GROUP_NAME}"
|
||||
EVENT_HUB_BINDINGS_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUB_BINDINGS_NAME=${EVENT_HUB_BINDINGS_NAME}"
|
||||
EVENT_HUB_BINDINGS_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsPolicyName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUB_BINDINGS_POLICY_NAME=${EVENT_HUB_BINDINGS_POLICY_NAME}"
|
||||
EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsConsumerGroupName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME=${EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME}"
|
||||
EVENT_HUB_PUBSUB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUB_PUBSUB_NAME=${EVENT_HUB_PUBSUB_NAME}"
|
||||
EVENT_HUB_PUBSUB_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubPolicyName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUB_PUBSUB_POLICY_NAME=${EVENT_HUB_PUBSUB_POLICY_NAME}"
|
||||
EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubConsumerGroupName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "INFO: EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME=${EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME}"
|
||||
|
||||
# Update service principal credentials and roles for created resources
|
||||
echo "Creating ${CERT_AUTH_SP_NAME} certificate ..."
|
||||
|
@ -404,12 +414,28 @@ az keyvault secret set --name "${SERVICE_BUS_CONNECTION_STRING_VAR_NAME}" --vaul
|
|||
# Populate Event Hubs test settings
|
||||
# ----------------------------------
|
||||
echo "Configuring Event Hub test settings ..."
|
||||
EVENT_HUBS_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo export ${EVENT_HUBS_CONNECTION_STRING_VAR_NAME}=\"${EVENT_HUBS_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_CONNECTION_STRING}"
|
||||
|
||||
echo export ${EVENT_HUBS_CONSUMER_GROUP_VAR_NAME}=\"${EVENT_HUBS_CONSUMER_GROUP_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_CONSUMER_GROUP_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_CONSUMER_GROUP_NAME}"
|
||||
EVENT_HUBS_BINDINGS_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_BINDINGS_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_BINDINGS_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo export ${EVENT_HUBS_BINDINGS_CONNECTION_STRING_VAR_NAME}=\"${EVENT_HUBS_BINDINGS_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_BINDINGS_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_BINDINGS_CONNECTION_STRING}"
|
||||
|
||||
echo export ${EVENT_HUBS_BINDINGS_CONSUMER_GROUP_VAR_NAME}=\"${EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_BINDINGS_CONSUMER_GROUP_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME}"
|
||||
|
||||
EVENT_HUBS_BINDINGS_CONTAINER_NAME="${PREFIX}-eventhubs-bindings-container"
|
||||
echo export ${EVENT_HUBS_BINDINGS_CONTAINER_VAR_NAME}=\"${EVENT_HUBS_BINDINGS_CONTAINER_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_BINDINGS_CONTAINER_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_BINDINGS_CONTAINER_NAME}"
|
||||
|
||||
EVENT_HUBS_PUBSUB_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_PUBSUB_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_PUBSUB_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo export ${EVENT_HUBS_PUBSUB_CONNECTION_STRING_VAR_NAME}=\"${EVENT_HUBS_PUBSUB_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_PUBSUB_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_PUBSUB_CONNECTION_STRING}"
|
||||
|
||||
echo export ${EVENT_HUBS_PUBSUB_CONSUMER_GROUP_VAR_NAME}=\"${EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_PUBSUB_CONSUMER_GROUP_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME}"
|
||||
|
||||
EVENT_HUBS_PUBSUB_CONTAINER_NAME="${PREFIX}-eventhubs-pubsub-container"
|
||||
echo export ${EVENT_HUBS_PUBSUB_CONTAINER_VAR_NAME}=\"${EVENT_HUBS_PUBSUB_CONTAINER_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_PUBSUB_CONTAINER_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_PUBSUB_CONTAINER_NAME}"
|
||||
|
||||
echo "INFO: setup-azure-conf-test completed."
|
||||
echo "INFO: Remember to \`source ${ENV_CONFIG_FILENAME}\` before running local conformance tests."
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
version: '2'
|
||||
services:
|
||||
standalone:
|
||||
image: influxdb:latest
|
||||
container_name: influxdb
|
||||
ports:
|
||||
- "8086:8086"
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=conf-test-user
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=conf-test-password
|
||||
- DOCKER_INFLUXDB_INIT_ORG=dapr-conf-test
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=dapr-conf-test-bucket
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUX_TOKEN}
|
|
@ -0,0 +1,13 @@
|
|||
version: '2'
|
||||
services:
|
||||
db:
|
||||
image: mysql
|
||||
command: --default-authentication-plugin=mysql_native_password
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: root
|
||||
MYSQL_DATABASE: dapr_state_store
|
||||
MYSQL_USER: dapr
|
||||
MYSQL_PASSWORD: example
|
||||
ports:
|
||||
- "3306:3306"
|
|
@ -0,0 +1,9 @@
|
|||
version: '2'
|
||||
services:
|
||||
sqlserver:
|
||||
image: mcr.microsoft.com/mssql/server:2019-GA-ubuntu-16.04
|
||||
ports:
|
||||
- "1433:1433"
|
||||
environment:
|
||||
ACCEPT_EULA: Y
|
||||
SA_PASSWORD: "Pass@Word1"
|
|
@ -43,6 +43,7 @@ jobs:
|
|||
run: |
|
||||
PR_COMPONENTS=$(yq -I0 --tojson eval - << EOF
|
||||
- bindings.http
|
||||
- bindings.influx
|
||||
- bindings.kafka
|
||||
- bindings.redis
|
||||
- bindings.mqtt-mosquitto
|
||||
|
@ -62,6 +63,8 @@ jobs:
|
|||
- secretstores.localfile
|
||||
- state.mongodb
|
||||
- state.redis
|
||||
- state.sqlserver
|
||||
- state.mysql
|
||||
EOF
|
||||
)
|
||||
echo "::set-output name=pr-components::$PR_COMPONENTS"
|
||||
|
@ -191,6 +194,10 @@ jobs:
|
|||
mongodb-replica-set: test-rs
|
||||
if: contains(matrix.component, 'mongodb')
|
||||
|
||||
- name: Start sqlserver
|
||||
run: docker-compose -f ./.github/infrastructure/docker-compose-sqlserver.yml -p sqlserver up -d
|
||||
if: contains(matrix.component, 'sqlserver')
|
||||
|
||||
- name: Start kafka
|
||||
run: docker-compose -f ./.github/infrastructure/docker-compose-kafka.yml -p kafka up -d
|
||||
if: contains(matrix.component, 'kafka')
|
||||
|
@ -223,6 +230,18 @@ jobs:
|
|||
run: docker-compose -f ./.github/infrastructure/docker-compose-rabbitmq.yml -p rabbitmq up -d
|
||||
if: contains(matrix.component, 'rabbitmq')
|
||||
|
||||
- name: Start influxdb
|
||||
run: |
|
||||
export INFLUX_TOKEN=$(openssl rand -base64 32)
|
||||
echo "INFLUX_TOKEN=$INFLUX_TOKEN" >> $GITHUB_ENV
|
||||
docker-compose -f ./.github/infrastructure/docker-compose-influxdb.yml -p influxdb up -d
|
||||
if: contains(matrix.component, 'influx')
|
||||
|
||||
- name: Start mysql
|
||||
run: |
|
||||
docker-compose -f ./.github/infrastructure/docker-compose-mysql.yml -p mysql up -d
|
||||
if: contains(matrix.component, 'mysql')
|
||||
|
||||
- name: Start KinD
|
||||
uses: helm/kind-action@v1.0.0
|
||||
if: contains(matrix.component, 'kubernetes')
|
||||
|
@ -258,7 +277,8 @@ jobs:
|
|||
echo "Running tests for Test${KIND_UPPER}Conformance/${KIND}/${NAME} ... "
|
||||
|
||||
set +e
|
||||
gotestsum --jsonfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_conformance.json --format standard-verbose -- \
|
||||
gotestsum --jsonfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_conformance.json \
|
||||
--junitfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_conformance.xml --format standard-verbose -- \
|
||||
-p 2 -count=1 -timeout=15m -tags=conftests ./tests/conformance --run="Test${KIND_UPPER}Conformance/${NAME}"
|
||||
|
||||
status=$?
|
||||
|
@ -299,10 +319,10 @@ jobs:
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Upload logs for dashboard like dapr/dapr E2E tests
|
||||
# Upload logs for test analytics to consume
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: ${{ matrix.component }}_conformance_test.json
|
||||
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_conformance.json
|
||||
name: ${{ matrix.component }}_conformance_test
|
||||
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_conformance.*
|
||||
|
|
6
Makefile
6
Makefile
|
@ -53,7 +53,7 @@ endif
|
|||
################################################################################
|
||||
.PHONY: test
|
||||
test:
|
||||
go test ./... $(COVERAGE_OPTS) $(BUILDMODE)
|
||||
CGO_ENABLED=$(CGO) go test ./... $(COVERAGE_OPTS) $(BUILDMODE)
|
||||
|
||||
################################################################################
|
||||
# Target: lint #
|
||||
|
@ -83,11 +83,11 @@ check-diff:
|
|||
################################################################################
|
||||
.PHONY: conf-tests
|
||||
conf-tests:
|
||||
@go test -v -tags=conftests -count=1 ./tests/conformance
|
||||
CGO_ENABLED=$(CGO) go test -v -tags=conftests -count=1 ./tests/conformance
|
||||
|
||||
################################################################################
|
||||
# Target: e2e-tests-zeebe #
|
||||
################################################################################
|
||||
.PHONY: e2e-tests-zeebe
|
||||
e2e-tests-zeebe:
|
||||
@go test -v -tags=e2etests -count=1 ./tests/e2e/bindings/zeebe/...
|
||||
CGO_ENABLED=$(CGO) go test -v -tags=e2etests -count=1 ./tests/e2e/bindings/zeebe/...
|
||||
|
|
|
@ -38,6 +38,9 @@ func NewEnvironmentSettings(resourceName string, values map[string]string) (Envi
|
|||
case "storage":
|
||||
// Azure Storage (data plane)
|
||||
es.Resource = azureEnv.ResourceIdentifiers.Storage
|
||||
case "cosmosdb":
|
||||
// Azure Cosmos DB (data plane)
|
||||
es.Resource = "https://" + azureEnv.CosmosDBDNSSuffix
|
||||
default:
|
||||
return es, errors.New("invalid resource name: " + resourceName)
|
||||
}
|
||||
|
|
|
@ -146,6 +146,9 @@ func TestGetMSI(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFallbackToMSI(t *testing.T) {
|
||||
os.Setenv("MSI_ENDPOINT", "test")
|
||||
defer os.Unsetenv("MSI_ENDPOINT")
|
||||
|
||||
settings, err := NewEnvironmentSettings(
|
||||
"keyvault",
|
||||
map[string]string{
|
||||
|
@ -153,6 +156,7 @@ func TestFallbackToMSI(t *testing.T) {
|
|||
"vaultName": "vaultName",
|
||||
},
|
||||
)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
spt, err := settings.GetServicePrincipalToken()
|
||||
|
@ -162,6 +166,9 @@ func TestFallbackToMSI(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuthorizorWithMSI(t *testing.T) {
|
||||
os.Setenv("MSI_ENDPOINT", "test")
|
||||
defer os.Unsetenv("MSI_ENDPOINT")
|
||||
|
||||
settings, err := NewEnvironmentSettings(
|
||||
"keyvault",
|
||||
map[string]string{
|
||||
|
@ -180,6 +187,9 @@ func TestAuthorizorWithMSI(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuthorizorWithMSIAndUserAssignedID(t *testing.T) {
|
||||
os.Setenv("MSI_ENDPOINT", "test")
|
||||
defer os.Unsetenv("MSI_ENDPOINT")
|
||||
|
||||
settings, err := NewEnvironmentSettings(
|
||||
"keyvault",
|
||||
map[string]string{
|
||||
|
|
|
@ -0,0 +1,341 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
|
||||
package tablestore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
tableName = "tableName"
|
||||
columnToGet = "columnToGet"
|
||||
primaryKeys = "primaryKeys"
|
||||
|
||||
invokeStartTimeKey = "start-time"
|
||||
invokeEndTimeKey = "end-time"
|
||||
invokeDurationKey = "duration"
|
||||
)
|
||||
|
||||
type tablestoreMetadata struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
AccessKeyID string `json:"accessKeyID"`
|
||||
AccessKey string `json:"accessKey"`
|
||||
InstanceName string `json:"instanceName"`
|
||||
TableName string `json:"tableName"`
|
||||
}
|
||||
|
||||
type AliCloudTableStore struct {
|
||||
logger logger.Logger
|
||||
client *tablestore.TableStoreClient
|
||||
metadata tablestoreMetadata
|
||||
}
|
||||
|
||||
func NewAliCloudTableStore(log logger.Logger) *AliCloudTableStore {
|
||||
return &AliCloudTableStore{
|
||||
logger: log,
|
||||
client: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) Init(metadata bindings.Metadata) error {
|
||||
m, err := s.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.metadata = *m
|
||||
s.client = tablestore.NewClient(m.Endpoint, m.InstanceName, m.AccessKeyID, m.AccessKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
if req == nil {
|
||||
return nil, errors.Errorf("invoke request required")
|
||||
}
|
||||
|
||||
startTime := time.Now().UTC()
|
||||
resp := &bindings.InvokeResponse{
|
||||
Metadata: map[string]string{
|
||||
invokeStartTimeKey: startTime.Format(time.RFC3339Nano),
|
||||
},
|
||||
}
|
||||
|
||||
switch req.Operation {
|
||||
case bindings.GetOperation:
|
||||
err := s.get(req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case bindings.ListOperation:
|
||||
err := s.list(req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case bindings.CreateOperation:
|
||||
err := s.create(req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case bindings.DeleteOperation:
|
||||
err := s.delete(req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid operation type: %s. Expected %s, %s, %s, or %s",
|
||||
req.Operation, bindings.GetOperation, bindings.ListOperation, bindings.CreateOperation, bindings.DeleteOperation)
|
||||
}
|
||||
|
||||
endTime := time.Now().UTC()
|
||||
resp.Metadata[invokeEndTimeKey] = endTime.Format(time.RFC3339Nano)
|
||||
resp.Metadata[invokeDurationKey] = endTime.Sub(startTime).String()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation, bindings.DeleteOperation, bindings.GetOperation, bindings.ListOperation}
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) parseMetadata(metadata bindings.Metadata) (*tablestoreMetadata, error) {
|
||||
b, err := json.Marshal(metadata.Properties)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var m tablestoreMetadata
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) get(req *bindings.InvokeRequest, resp *bindings.InvokeResponse) error {
|
||||
columns := strings.Split(req.Metadata[columnToGet], ",")
|
||||
pkNames := strings.Split(req.Metadata[primaryKeys], ",")
|
||||
pks := make([]*tablestore.PrimaryKeyColumn, len(pkNames))
|
||||
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal(req.Data, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx, pkName := range pkNames {
|
||||
pks[idx] = &tablestore.PrimaryKeyColumn{
|
||||
ColumnName: pkName,
|
||||
Value: data[pkName],
|
||||
}
|
||||
}
|
||||
|
||||
criteria := &tablestore.SingleRowQueryCriteria{
|
||||
TableName: s.getTableName(req.Metadata),
|
||||
PrimaryKey: &tablestore.PrimaryKey{PrimaryKeys: pks},
|
||||
ColumnsToGet: columns,
|
||||
MaxVersion: 1,
|
||||
}
|
||||
getRowReq := &tablestore.GetRowRequest{
|
||||
SingleRowQueryCriteria: criteria,
|
||||
}
|
||||
getRowResp, err := s.client.GetRow(getRowReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := s.unmarshal(getRowResp.PrimaryKey.PrimaryKeys, getRowResp.Columns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ret == nil {
|
||||
resp.Data = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resp.Data, err = json.Marshal(ret)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) list(req *bindings.InvokeRequest, resp *bindings.InvokeResponse) error {
|
||||
columns := strings.Split(req.Metadata[columnToGet], ",")
|
||||
pkNames := strings.Split(req.Metadata[primaryKeys], ",")
|
||||
|
||||
var data []map[string]interface{}
|
||||
err := json.Unmarshal(req.Data, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
criteria := &tablestore.MultiRowQueryCriteria{
|
||||
TableName: s.getTableName(req.Metadata),
|
||||
ColumnsToGet: columns,
|
||||
MaxVersion: 1,
|
||||
}
|
||||
|
||||
for _, item := range data {
|
||||
pk := &tablestore.PrimaryKey{}
|
||||
for _, pkName := range pkNames {
|
||||
pk.AddPrimaryKeyColumn(pkName, item[pkName])
|
||||
}
|
||||
criteria.AddRow(pk)
|
||||
}
|
||||
|
||||
getRowRequest := &tablestore.BatchGetRowRequest{}
|
||||
getRowRequest.MultiRowQueryCriteria = append(getRowRequest.MultiRowQueryCriteria, criteria)
|
||||
getRowResp, err := s.client.BatchGetRow(getRowRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ret []interface{}
|
||||
|
||||
for _, criteria := range getRowRequest.MultiRowQueryCriteria {
|
||||
for _, row := range getRowResp.TableToRowsResult[criteria.TableName] {
|
||||
rowData, rowErr := s.unmarshal(row.PrimaryKey.PrimaryKeys, row.Columns)
|
||||
if rowErr != nil {
|
||||
return rowErr
|
||||
}
|
||||
ret = append(ret, rowData)
|
||||
}
|
||||
}
|
||||
|
||||
resp.Data, err = json.Marshal(ret)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) create(req *bindings.InvokeRequest, resp *bindings.InvokeResponse) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal(req.Data, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkNames := strings.Split(req.Metadata[primaryKeys], ",")
|
||||
pks := make([]*tablestore.PrimaryKeyColumn, len(pkNames))
|
||||
columns := make([]tablestore.AttributeColumn, len(data)-len(pkNames))
|
||||
|
||||
for idx, pk := range pkNames {
|
||||
pks[idx] = &tablestore.PrimaryKeyColumn{
|
||||
ColumnName: pk,
|
||||
Value: data[pk],
|
||||
}
|
||||
}
|
||||
|
||||
idx := 0
|
||||
for key, val := range data {
|
||||
if !contains(pkNames, key) {
|
||||
columns[idx] = tablestore.AttributeColumn{
|
||||
ColumnName: key,
|
||||
Value: val,
|
||||
}
|
||||
idx++
|
||||
}
|
||||
}
|
||||
|
||||
change := tablestore.PutRowChange{
|
||||
TableName: s.getTableName(req.Metadata),
|
||||
PrimaryKey: &tablestore.PrimaryKey{PrimaryKeys: pks},
|
||||
Columns: columns,
|
||||
ReturnType: tablestore.ReturnType_RT_NONE,
|
||||
TransactionId: nil,
|
||||
}
|
||||
|
||||
change.SetCondition(tablestore.RowExistenceExpectation_IGNORE)
|
||||
|
||||
putRequest := &tablestore.PutRowRequest{
|
||||
PutRowChange: &change,
|
||||
}
|
||||
|
||||
_, err = s.client.PutRow(putRequest)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) delete(req *bindings.InvokeRequest, resp *bindings.InvokeResponse) error {
|
||||
pkNams := strings.Split(req.Metadata[primaryKeys], ",")
|
||||
pks := make([]*tablestore.PrimaryKeyColumn, len(pkNams))
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal(req.Data, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx, pkName := range pkNams {
|
||||
pks[idx] = &tablestore.PrimaryKeyColumn{
|
||||
ColumnName: pkName,
|
||||
Value: data[pkName],
|
||||
}
|
||||
}
|
||||
|
||||
change := &tablestore.DeleteRowChange{
|
||||
TableName: s.getTableName(req.Metadata),
|
||||
PrimaryKey: &tablestore.PrimaryKey{PrimaryKeys: pks},
|
||||
}
|
||||
change.SetCondition(tablestore.RowExistenceExpectation_IGNORE)
|
||||
deleteReq := &tablestore.DeleteRowRequest{DeleteRowChange: change}
|
||||
_, err = s.client.DeleteRow(deleteReq)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) unmarshal(pks []*tablestore.PrimaryKeyColumn, columns []*tablestore.AttributeColumn) (map[string]interface{}, error) {
|
||||
if pks == nil && columns == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data := make(map[string]interface{})
|
||||
|
||||
for _, pk := range pks {
|
||||
data[pk.ColumnName] = pk.Value
|
||||
}
|
||||
|
||||
for _, column := range columns {
|
||||
data[column.ColumnName] = column.Value
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (s *AliCloudTableStore) getTableName(metadata map[string]string) string {
|
||||
name := metadata[tableName]
|
||||
if name == "" {
|
||||
name = s.metadata.TableName
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
func contains(arr []string, str string) bool {
|
||||
for _, a := range arr {
|
||||
if a == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
|
||||
package tablestore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTableStoreMetadata(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{"accessKeyID": "ACCESSKEYID", "accessKey": "ACCESSKEY", "instanceName": "INSTANCENAME", "tableName": "TABLENAME", "endpoint": "ENDPOINT"}
|
||||
aliCloudTableStore := AliCloudTableStore{}
|
||||
|
||||
meta, err := aliCloudTableStore.parseMetadata(m)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "ACCESSKEYID", meta.AccessKeyID)
|
||||
assert.Equal(t, "ACCESSKEY", meta.AccessKey)
|
||||
assert.Equal(t, "INSTANCENAME", meta.InstanceName)
|
||||
assert.Equal(t, "TABLENAME", meta.TableName)
|
||||
assert.Equal(t, "ENDPOINT", meta.Endpoint)
|
||||
}
|
||||
|
||||
func TestDataEncodeAndDecode(t *testing.T) {
|
||||
if !isLiveTest() {
|
||||
return
|
||||
}
|
||||
|
||||
aliCloudTableStore := NewAliCloudTableStore(logger.NewLogger("test"))
|
||||
|
||||
metadata := bindings.Metadata{
|
||||
Properties: getTestProperties(),
|
||||
}
|
||||
aliCloudTableStore.Init(metadata)
|
||||
|
||||
// test create
|
||||
putData := map[string]interface{}{
|
||||
"pk1": "data1",
|
||||
"column1": "the string value of column1",
|
||||
"column2": int64(2),
|
||||
}
|
||||
data, err := json.Marshal(putData)
|
||||
assert.Nil(t, err)
|
||||
putRowReq := &bindings.InvokeRequest{
|
||||
Operation: bindings.CreateOperation,
|
||||
Metadata: map[string]string{
|
||||
tableName: "dapr_test_table2",
|
||||
primaryKeys: "pk1",
|
||||
},
|
||||
Data: data,
|
||||
}
|
||||
|
||||
putInvokeResp, err := aliCloudTableStore.Invoke(putRowReq)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, putInvokeResp)
|
||||
|
||||
putRowReq.Data, _ = json.Marshal(map[string]interface{}{
|
||||
"pk1": "data2",
|
||||
"column1": "the string value of column1",
|
||||
"column2": int64(2),
|
||||
})
|
||||
|
||||
putInvokeResp, err = aliCloudTableStore.Invoke(putRowReq)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, putInvokeResp)
|
||||
|
||||
// test get
|
||||
getData, err := json.Marshal(map[string]interface{}{
|
||||
"pk1": "data1",
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
getInvokeReq := &bindings.InvokeRequest{
|
||||
Operation: bindings.GetOperation,
|
||||
Metadata: map[string]string{
|
||||
tableName: "dapr_test_table2",
|
||||
primaryKeys: "pk1",
|
||||
columnToGet: "column1,column2,column3",
|
||||
},
|
||||
Data: getData,
|
||||
}
|
||||
|
||||
getInvokeResp, err := aliCloudTableStore.Invoke(getInvokeReq)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, getInvokeResp)
|
||||
|
||||
respData := make(map[string]interface{})
|
||||
err = json.Unmarshal(getInvokeResp.Data, &respData)
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, putData["column1"], respData["column1"])
|
||||
assert.Equal(t, putData["column2"], int64(respData["column2"].(float64)))
|
||||
|
||||
// test list
|
||||
listData, err := json.Marshal([]map[string]interface{}{
|
||||
{
|
||||
"pk1": "data1",
|
||||
},
|
||||
{
|
||||
"pk1": "data2",
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
listReq := &bindings.InvokeRequest{
|
||||
Operation: bindings.ListOperation,
|
||||
Metadata: map[string]string{
|
||||
tableName: "dapr_test_table2",
|
||||
primaryKeys: "pk1",
|
||||
columnToGet: "column1,column2,column3",
|
||||
},
|
||||
Data: listData,
|
||||
}
|
||||
|
||||
listResp, err := aliCloudTableStore.Invoke(listReq)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, listResp)
|
||||
|
||||
listRespData := make([]map[string]interface{}, len(listData))
|
||||
err = json.Unmarshal(listResp.Data, &listRespData)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, listRespData, 2)
|
||||
|
||||
assert.Equal(t, listRespData[0]["column1"], putData["column1"])
|
||||
assert.Equal(t, listRespData[1]["pk1"], "data2")
|
||||
|
||||
// test delete
|
||||
deleteData, err := json.Marshal(map[string]interface{}{
|
||||
"pk1": "data1",
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
deleteReq := &bindings.InvokeRequest{
|
||||
Operation: bindings.DeleteOperation,
|
||||
Metadata: map[string]string{
|
||||
tableName: "dapr_test_table2",
|
||||
primaryKeys: "pk1",
|
||||
},
|
||||
Data: deleteData,
|
||||
}
|
||||
|
||||
deleteResp, err := aliCloudTableStore.Invoke(deleteReq)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, deleteResp)
|
||||
|
||||
getInvokeResp, err = aliCloudTableStore.Invoke(getInvokeReq)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, getInvokeResp.Data)
|
||||
}
|
||||
|
||||
func getTestProperties() map[string]string {
|
||||
return map[string]string{
|
||||
"accessKeyID": "****",
|
||||
"accessKey": "****",
|
||||
"instanceName": "dapr-test",
|
||||
"tableName": "dapr_test_table2",
|
||||
"endpoint": "https://dapr-test.cn-hangzhou.ots.aliyuncs.com",
|
||||
}
|
||||
}
|
||||
|
||||
func isLiveTest() bool {
|
||||
return os.Getenv("RUN_LIVE_ROCKETMQ_TEST") == "true"
|
||||
}
|
|
@ -7,9 +7,14 @@ package s3
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
b64 "encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
|
@ -17,11 +22,22 @@ import (
|
|||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
metadataDecodeBase64 = "decodeBase64"
|
||||
metadataEncodeBase64 = "encodeBase64"
|
||||
|
||||
metadataKey = "key"
|
||||
|
||||
maxResults = 1000
|
||||
)
|
||||
|
||||
// AWSS3 is a binding for an AWS S3 storage bucket
|
||||
type AWSS3 struct {
|
||||
metadata *s3Metadata
|
||||
uploader *s3manager.Uploader
|
||||
logger logger.Logger
|
||||
metadata *s3Metadata
|
||||
s3Client *s3.S3
|
||||
uploader *s3manager.Uploader
|
||||
downloader *s3manager.Downloader
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
type s3Metadata struct {
|
||||
|
@ -31,6 +47,20 @@ type s3Metadata struct {
|
|||
SecretKey string `json:"secretKey"`
|
||||
SessionToken string `json:"sessionToken"`
|
||||
Bucket string `json:"bucket"`
|
||||
DecodeBase64 bool `json:"decodeBase64,string"`
|
||||
EncodeBase64 bool `json:"encodeBase64,string"`
|
||||
}
|
||||
|
||||
type createResponse struct {
|
||||
Location string `json:"location"`
|
||||
VersionID *string `json:"versionID"`
|
||||
}
|
||||
|
||||
type listPayload struct {
|
||||
Marker string `json:"marker"`
|
||||
Prefix string `json:"prefix"`
|
||||
MaxResults int32 `json:"maxResults"`
|
||||
Delimiter string `json:"delimiter"`
|
||||
}
|
||||
|
||||
// NewAWSS3 returns a new AWSS3 instance
|
||||
|
@ -44,39 +74,185 @@ func (s *AWSS3) Init(metadata bindings.Metadata) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploader, err := s.getClient(m)
|
||||
session, err := s.getSession(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.metadata = m
|
||||
s.uploader = uploader
|
||||
s.s3Client = s3.New(session)
|
||||
s.downloader = s3manager.NewDownloader(session)
|
||||
s.uploader = s3manager.NewUploader(session)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AWSS3) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
func (s *AWSS3) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AWSS3) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
key := ""
|
||||
if val, ok := req.Metadata["key"]; ok && val != "" {
|
||||
func (s *AWSS3) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
bindings.CreateOperation,
|
||||
bindings.GetOperation,
|
||||
bindings.DeleteOperation,
|
||||
bindings.ListOperation,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AWSS3) create(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
metadata, err := s.metadata.mergeWithRequestMetadata(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. error merge metadata : %w", err)
|
||||
}
|
||||
var key string
|
||||
if val, ok := req.Metadata[metadataKey]; ok && val != "" {
|
||||
key = val
|
||||
} else {
|
||||
key = uuid.New().String()
|
||||
s.logger.Debugf("key not found. generating key %s", key)
|
||||
}
|
||||
|
||||
d, err := strconv.Unquote(string(req.Data))
|
||||
if err == nil {
|
||||
req.Data = []byte(d)
|
||||
}
|
||||
|
||||
if metadata.DecodeBase64 {
|
||||
decoded, decodeError := b64.StdEncoding.DecodeString(string(req.Data))
|
||||
if decodeError != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. decode : %w", decodeError)
|
||||
}
|
||||
req.Data = decoded
|
||||
}
|
||||
|
||||
r := bytes.NewReader(req.Data)
|
||||
_, err := s.uploader.Upload(&s3manager.UploadInput{
|
||||
Bucket: aws.String(s.metadata.Bucket),
|
||||
|
||||
resultUpload, err := s.uploader.Upload(&s3manager.UploadInput{
|
||||
Bucket: aws.String(metadata.Bucket),
|
||||
Key: aws.String(key),
|
||||
Body: r,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. Uploading: %w", err)
|
||||
}
|
||||
|
||||
jsonResponse, err := json.Marshal(createResponse{
|
||||
Location: resultUpload.Location,
|
||||
VersionID: resultUpload.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. Error marshalling create response: %w", err)
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: jsonResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *AWSS3) get(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
metadata, err := s.metadata.mergeWithRequestMetadata(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. error merge metadata : %w", err)
|
||||
}
|
||||
|
||||
var key string
|
||||
if val, ok := req.Metadata[metadataKey]; ok && val != "" {
|
||||
key = val
|
||||
} else {
|
||||
return nil, fmt.Errorf("s3 binding error: can't read key value")
|
||||
}
|
||||
|
||||
buff := &aws.WriteAtBuffer{}
|
||||
|
||||
_, err = s.downloader.Download(buff,
|
||||
&s3.GetObjectInput{
|
||||
Bucket: aws.String(s.metadata.Bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error: error downloading S3 object: %w", err)
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if metadata.EncodeBase64 {
|
||||
encoded := b64.StdEncoding.EncodeToString(buff.Bytes())
|
||||
data = []byte(encoded)
|
||||
} else {
|
||||
data = buff.Bytes()
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: data,
|
||||
Metadata: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *AWSS3) delete(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var key string
|
||||
if val, ok := req.Metadata[metadataKey]; ok && val != "" {
|
||||
key = val
|
||||
} else {
|
||||
return nil, fmt.Errorf("s3 binding error: can't read key value")
|
||||
}
|
||||
|
||||
_, err := s.s3Client.DeleteObject(
|
||||
&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.metadata.Bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (s *AWSS3) list(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var payload listPayload
|
||||
err := json.Unmarshal(req.Data, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if payload.MaxResults == int32(0) {
|
||||
payload.MaxResults = maxResults
|
||||
}
|
||||
|
||||
input := &s3.ListObjectsInput{
|
||||
Bucket: aws.String(s.metadata.Bucket),
|
||||
MaxKeys: aws.Int64(int64(payload.MaxResults)),
|
||||
Marker: aws.String(payload.Marker),
|
||||
Prefix: aws.String(payload.Prefix),
|
||||
Delimiter: aws.String(payload.Delimiter),
|
||||
}
|
||||
|
||||
result, err := s.s3Client.ListObjects(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. list operation. cannot marshal blobs to json: %w", err)
|
||||
}
|
||||
|
||||
jsonResponse, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3 binding error. list operation. cannot marshal blobs to json: %w", err)
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: jsonResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *AWSS3) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
switch req.Operation {
|
||||
case bindings.CreateOperation:
|
||||
return s.create(req)
|
||||
case bindings.GetOperation:
|
||||
return s.get(req)
|
||||
case bindings.DeleteOperation:
|
||||
return s.delete(req)
|
||||
case bindings.ListOperation:
|
||||
return s.list(req)
|
||||
default:
|
||||
return nil, fmt.Errorf("s3 binding error. unsupported operation %s", req.Operation)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AWSS3) parseMetadata(metadata bindings.Metadata) (*s3Metadata, error) {
|
||||
b, err := json.Marshal(metadata.Properties)
|
||||
if err != nil {
|
||||
|
@ -92,13 +268,34 @@ func (s *AWSS3) parseMetadata(metadata bindings.Metadata) (*s3Metadata, error) {
|
|||
return &m, nil
|
||||
}
|
||||
|
||||
func (s *AWSS3) getClient(metadata *s3Metadata) (*s3manager.Uploader, error) {
|
||||
func (s *AWSS3) getSession(metadata *s3Metadata) (*session.Session, error) {
|
||||
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, metadata.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploader := s3manager.NewUploader(sess)
|
||||
|
||||
return uploader, nil
|
||||
return sess, nil
|
||||
}
|
||||
|
||||
// Helper to merge config and request metadata
|
||||
func (metadata s3Metadata) mergeWithRequestMetadata(req *bindings.InvokeRequest) (s3Metadata, error) {
|
||||
merged := metadata
|
||||
|
||||
if val, ok := req.Metadata[metadataDecodeBase64]; ok && val != "" {
|
||||
valBool, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return merged, err
|
||||
}
|
||||
merged.DecodeBase64 = valBool
|
||||
}
|
||||
|
||||
if val, ok := req.Metadata[metadataEncodeBase64]; ok && val != "" {
|
||||
valBool, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return merged, err
|
||||
}
|
||||
merged.EncodeBase64 = valBool
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
|
|
|
@ -9,21 +9,136 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"AccessKey": "key", "Region": "region", "SecretKey": "secret", "Bucket": "test", "Endpoint": "endpoint", "SessionToken": "token",
|
||||
}
|
||||
s3 := AWSS3{}
|
||||
meta, err := s3.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", meta.AccessKey)
|
||||
assert.Equal(t, "region", meta.Region)
|
||||
assert.Equal(t, "secret", meta.SecretKey)
|
||||
assert.Equal(t, "test", meta.Bucket)
|
||||
assert.Equal(t, "endpoint", meta.Endpoint)
|
||||
assert.Equal(t, "token", meta.SessionToken)
|
||||
t.Run("Has correct metadata", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"AccessKey": "key", "Region": "region", "SecretKey": "secret", "Bucket": "test", "Endpoint": "endpoint", "SessionToken": "token",
|
||||
}
|
||||
s3 := AWSS3{}
|
||||
meta, err := s3.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", meta.AccessKey)
|
||||
assert.Equal(t, "region", meta.Region)
|
||||
assert.Equal(t, "secret", meta.SecretKey)
|
||||
assert.Equal(t, "test", meta.Bucket)
|
||||
assert.Equal(t, "endpoint", meta.Endpoint)
|
||||
assert.Equal(t, "token", meta.SessionToken)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMergeWithRequestMetadata(t *testing.T) {
|
||||
t.Run("Has merged metadata", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"AccessKey": "key", "Region": "region", "SecretKey": "secret", "Bucket": "test", "Endpoint": "endpoint", "SessionToken": "token",
|
||||
}
|
||||
s3 := AWSS3{}
|
||||
meta, err := s3.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", meta.AccessKey)
|
||||
assert.Equal(t, "region", meta.Region)
|
||||
assert.Equal(t, "secret", meta.SecretKey)
|
||||
assert.Equal(t, "test", meta.Bucket)
|
||||
assert.Equal(t, "endpoint", meta.Endpoint)
|
||||
assert.Equal(t, "token", meta.SessionToken)
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"decodeBase64": "true",
|
||||
"encodeBase64": "false",
|
||||
}
|
||||
|
||||
mergedMeta, err := meta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", mergedMeta.AccessKey)
|
||||
assert.Equal(t, "region", mergedMeta.Region)
|
||||
assert.Equal(t, "secret", mergedMeta.SecretKey)
|
||||
assert.Equal(t, "test", mergedMeta.Bucket)
|
||||
assert.Equal(t, "endpoint", mergedMeta.Endpoint)
|
||||
assert.Equal(t, "token", mergedMeta.SessionToken)
|
||||
assert.Equal(t, true, mergedMeta.DecodeBase64)
|
||||
assert.Equal(t, false, mergedMeta.EncodeBase64)
|
||||
})
|
||||
|
||||
t.Run("Has invalid merged metadata decodeBase64", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"AccessKey": "key", "Region": "region", "SecretKey": "secret", "Bucket": "test", "Endpoint": "endpoint", "SessionToken": "token",
|
||||
}
|
||||
s3 := AWSS3{}
|
||||
meta, err := s3.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", meta.AccessKey)
|
||||
assert.Equal(t, "region", meta.Region)
|
||||
assert.Equal(t, "secret", meta.SecretKey)
|
||||
assert.Equal(t, "test", meta.Bucket)
|
||||
assert.Equal(t, "endpoint", meta.Endpoint)
|
||||
assert.Equal(t, "token", meta.SessionToken)
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"decodeBase64": "hello",
|
||||
}
|
||||
|
||||
mergedMeta, err := meta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.NotNil(t, mergedMeta)
|
||||
})
|
||||
|
||||
t.Run("Has invalid merged metadata encodeBase64", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"AccessKey": "key", "Region": "region", "SecretKey": "secret", "Bucket": "test", "Endpoint": "endpoint", "SessionToken": "token",
|
||||
}
|
||||
s3 := AWSS3{}
|
||||
meta, err := s3.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", meta.AccessKey)
|
||||
assert.Equal(t, "region", meta.Region)
|
||||
assert.Equal(t, "secret", meta.SecretKey)
|
||||
assert.Equal(t, "test", meta.Bucket)
|
||||
assert.Equal(t, "endpoint", meta.Endpoint)
|
||||
assert.Equal(t, "token", meta.SessionToken)
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"encodeBase64": "bye",
|
||||
}
|
||||
|
||||
mergedMeta, err := meta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.NotNil(t, mergedMeta)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetOption(t *testing.T) {
|
||||
s3 := NewAWSS3(logger.NewLogger("s3"))
|
||||
s3.metadata = &s3Metadata{}
|
||||
|
||||
t.Run("return error if key is missing", func(t *testing.T) {
|
||||
r := bindings.InvokeRequest{}
|
||||
_, err := s3.get(&r)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteOption(t *testing.T) {
|
||||
s3 := NewAWSS3(logger.NewLogger("s3"))
|
||||
s3.metadata = &s3Metadata{}
|
||||
|
||||
t.Run("return error if key is missing", func(t *testing.T) {
|
||||
r := bindings.InvokeRequest{}
|
||||
_, err := s3.delete(&r)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
|
||||
package ses
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ses"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
// The character encoding for the email.
|
||||
CharSet = "UTF-8"
|
||||
)
|
||||
|
||||
// AWSSES is an AWS SNS binding
|
||||
type AWSSES struct {
|
||||
metadata *sesMetadata
|
||||
logger logger.Logger
|
||||
svc *ses.SES
|
||||
}
|
||||
|
||||
type sesMetadata struct {
|
||||
Region string `json:"region"`
|
||||
AccessKey string `json:"accessKey"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
SessionToken string `json:"sessionToken"`
|
||||
EmailFrom string `json:"emailFrom"`
|
||||
EmailTo string `json:"emailTo"`
|
||||
Subject string `json:"subject"`
|
||||
EmailCc string `json:"emailCc"`
|
||||
EmailBcc string `json:"emailBcc"`
|
||||
}
|
||||
|
||||
// NewAWSSES creates a new AWSSES binding instance
|
||||
func NewAWSSES(logger logger.Logger) *AWSSES {
|
||||
return &AWSSES{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing
|
||||
func (a *AWSSES) Init(metadata bindings.Metadata) error {
|
||||
// Parse input metadata
|
||||
meta, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svc, err := a.getClient(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.metadata = meta
|
||||
a.svc = svc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AWSSES) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
||||
func (a *AWSSES) parseMetadata(meta bindings.Metadata) (*sesMetadata, error) {
|
||||
b, err := json.Marshal(meta.Properties)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var m sesMetadata
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if meta.Properties["region"] == "" || meta.Properties["accessKey"] == "" ||
|
||||
meta.Properties["secretKey"] == "" {
|
||||
return &m, errors.New("SES binding error: region, accessKey or secretKey fields are required in metadata")
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func (a *AWSSES) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
metadata := a.metadata.mergeWithRequestMetadata(req)
|
||||
|
||||
if metadata.EmailFrom == "" {
|
||||
return nil, fmt.Errorf("SES binding error: emailFrom property not supplied in configuration- or request-metadata")
|
||||
}
|
||||
if metadata.EmailTo == "" {
|
||||
return nil, fmt.Errorf("SES binding error: emailTo property not supplied in configuration- or request-metadata")
|
||||
}
|
||||
if metadata.Subject == "" {
|
||||
return nil, fmt.Errorf("SES binding error: subject property not supplied in configuration- or request-metadata")
|
||||
}
|
||||
|
||||
body, err := strconv.Unquote(string(req.Data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SES binding error. Can't unquote data field: %w", err)
|
||||
}
|
||||
|
||||
// Assemble the email.
|
||||
input := &ses.SendEmailInput{
|
||||
Destination: &ses.Destination{
|
||||
ToAddresses: aws.StringSlice(strings.Split(metadata.EmailTo, ";")),
|
||||
},
|
||||
Message: &ses.Message{
|
||||
Body: &ses.Body{
|
||||
Html: &ses.Content{
|
||||
Charset: aws.String(CharSet),
|
||||
Data: aws.String(body),
|
||||
},
|
||||
},
|
||||
Subject: &ses.Content{
|
||||
Charset: aws.String(CharSet),
|
||||
Data: aws.String(metadata.Subject),
|
||||
},
|
||||
},
|
||||
Source: aws.String(metadata.EmailFrom),
|
||||
// TODO: Add configuration set: https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-configuration-sets.html
|
||||
// ConfigurationSetName: aws.String(ConfigurationSet),
|
||||
}
|
||||
|
||||
if metadata.EmailCc != "" {
|
||||
input.SetDestination(&ses.Destination{
|
||||
CcAddresses: aws.StringSlice(strings.Split(metadata.EmailCc, ";")),
|
||||
})
|
||||
}
|
||||
if metadata.EmailBcc != "" {
|
||||
input.SetDestination(&ses.Destination{
|
||||
BccAddresses: aws.StringSlice(strings.Split(metadata.EmailBcc, ";")),
|
||||
})
|
||||
}
|
||||
|
||||
// Attempt to send the email.
|
||||
result, err := a.svc.SendEmail(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SES binding error. Sending email failed: %w", err)
|
||||
}
|
||||
|
||||
a.logger.Debug("SES binding: sent email successfully ", result.MessageId)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Helper to merge config and request metadata
|
||||
func (metadata sesMetadata) mergeWithRequestMetadata(req *bindings.InvokeRequest) sesMetadata {
|
||||
merged := metadata
|
||||
|
||||
if emailFrom := req.Metadata["emailFrom"]; emailFrom != "" {
|
||||
merged.EmailFrom = emailFrom
|
||||
}
|
||||
|
||||
if emailTo := req.Metadata["emailTo"]; emailTo != "" {
|
||||
merged.EmailTo = emailTo
|
||||
}
|
||||
|
||||
if emailCC := req.Metadata["emailCc"]; emailCC != "" {
|
||||
merged.EmailCc = emailCC
|
||||
}
|
||||
|
||||
if emailBCC := req.Metadata["emailBcc"]; emailBCC != "" {
|
||||
merged.EmailBcc = emailBCC
|
||||
}
|
||||
|
||||
if subject := req.Metadata["subject"]; subject != "" {
|
||||
merged.Subject = subject
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
func (a *AWSSES) getClient(metadata *sesMetadata) (*ses.SES, error) {
|
||||
sess, err := aws_auth.GetClient(metadata.AccessKey, metadata.SecretKey, metadata.SessionToken, metadata.Region, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SES binding error: error creating AWS session %w", err)
|
||||
}
|
||||
|
||||
// Create an SES instance
|
||||
svc := ses.New(sess)
|
||||
|
||||
return svc, nil
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
|
||||
package ses
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
logger := logger.NewLogger("test")
|
||||
|
||||
t.Run("Has correct metadata", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"region": "myRegionForSES",
|
||||
"accessKey": "myAccessKeyForSES",
|
||||
"secretKey": "mySecretKeyForSES",
|
||||
"sessionToken": "mySessionToken",
|
||||
"emailFrom": "from@dapr.io",
|
||||
"emailTo": "to@dapr.io",
|
||||
"emailCc": "cc@dapr.io",
|
||||
"emailBcc": "bcc@dapr.io",
|
||||
"subject": "Test email",
|
||||
}
|
||||
r := AWSSES{logger: logger}
|
||||
smtpMeta, err := r.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "myRegionForSES", smtpMeta.Region)
|
||||
assert.Equal(t, "myAccessKeyForSES", smtpMeta.AccessKey)
|
||||
assert.Equal(t, "mySecretKeyForSES", smtpMeta.SecretKey)
|
||||
assert.Equal(t, "mySessionToken", smtpMeta.SessionToken)
|
||||
assert.Equal(t, "from@dapr.io", smtpMeta.EmailFrom)
|
||||
assert.Equal(t, "to@dapr.io", smtpMeta.EmailTo)
|
||||
assert.Equal(t, "cc@dapr.io", smtpMeta.EmailCc)
|
||||
assert.Equal(t, "bcc@dapr.io", smtpMeta.EmailBcc)
|
||||
assert.Equal(t, "Test email", smtpMeta.Subject)
|
||||
})
|
||||
|
||||
t.Run("region is required", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"accessKey": "myAccessKeyForSES",
|
||||
"secretKey": "mySecretKeyForSES",
|
||||
"emailFrom": "from@dapr.io",
|
||||
"emailTo": "to@dapr.io",
|
||||
"emailCc": "cc@dapr.io",
|
||||
"emailBcc": "bcc@dapr.io",
|
||||
"subject": "Test email",
|
||||
}
|
||||
r := AWSSES{logger: logger}
|
||||
_, err := r.parseMetadata(m)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("accessKey is required", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"region": "myRegionForSES",
|
||||
"secretKey": "mySecretKeyForSES",
|
||||
"emailFrom": "from@dapr.io",
|
||||
"emailTo": "to@dapr.io",
|
||||
"emailCc": "cc@dapr.io",
|
||||
"emailBcc": "bcc@dapr.io",
|
||||
"subject": "Test email",
|
||||
}
|
||||
r := AWSSES{logger: logger}
|
||||
_, err := r.parseMetadata(m)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("secretKey is required", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"region": "myRegionForSES",
|
||||
"accessKey": "myAccessKeyForSES",
|
||||
"emailFrom": "from@dapr.io",
|
||||
"emailTo": "to@dapr.io",
|
||||
"emailCc": "cc@dapr.io",
|
||||
"emailBcc": "bcc@dapr.io",
|
||||
"subject": "Test email",
|
||||
}
|
||||
r := AWSSES{logger: logger}
|
||||
_, err := r.parseMetadata(m)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMergeWithRequestMetadata(t *testing.T) {
|
||||
t.Run("Has merged metadata", func(t *testing.T) {
|
||||
sesMeta := sesMetadata{
|
||||
Region: "myRegionForSES",
|
||||
AccessKey: "myAccessKeyForSES",
|
||||
SecretKey: "mySecretKeyForSES",
|
||||
EmailFrom: "from@dapr.io",
|
||||
EmailTo: "to@dapr.io",
|
||||
EmailCc: "cc@dapr.io",
|
||||
EmailBcc: "bcc@dapr.io",
|
||||
Subject: "Test email",
|
||||
}
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"emailFrom": "req-from@dapr.io",
|
||||
"emailTo": "req-to@dapr.io",
|
||||
"emailCc": "req-cc@dapr.io",
|
||||
"emailBcc": "req-bcc@dapr.io",
|
||||
"subject": "req-Test email",
|
||||
}
|
||||
|
||||
mergedMeta := sesMeta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.Equal(t, "myRegionForSES", mergedMeta.Region)
|
||||
assert.Equal(t, "myAccessKeyForSES", mergedMeta.AccessKey)
|
||||
assert.Equal(t, "mySecretKeyForSES", mergedMeta.SecretKey)
|
||||
assert.Equal(t, "req-from@dapr.io", mergedMeta.EmailFrom)
|
||||
assert.Equal(t, "req-to@dapr.io", mergedMeta.EmailTo)
|
||||
assert.Equal(t, "req-cc@dapr.io", mergedMeta.EmailCc)
|
||||
assert.Equal(t, "req-bcc@dapr.io", mergedMeta.EmailBcc)
|
||||
assert.Equal(t, "req-Test email", mergedMeta.Subject)
|
||||
})
|
||||
|
||||
t.Run("Has no merged metadata", func(t *testing.T) {
|
||||
sesMeta := sesMetadata{
|
||||
Region: "myRegionForSES",
|
||||
AccessKey: "myAccessKeyForSES",
|
||||
SecretKey: "mySecretKeyForSES",
|
||||
EmailFrom: "from@dapr.io",
|
||||
EmailTo: "to@dapr.io",
|
||||
EmailCc: "cc@dapr.io",
|
||||
EmailBcc: "bcc@dapr.io",
|
||||
Subject: "Test email",
|
||||
}
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{}
|
||||
|
||||
mergedMeta := sesMeta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.Equal(t, "myRegionForSES", mergedMeta.Region)
|
||||
assert.Equal(t, "myAccessKeyForSES", mergedMeta.AccessKey)
|
||||
assert.Equal(t, "mySecretKeyForSES", mergedMeta.SecretKey)
|
||||
assert.Equal(t, "from@dapr.io", mergedMeta.EmailFrom)
|
||||
assert.Equal(t, "to@dapr.io", mergedMeta.EmailTo)
|
||||
assert.Equal(t, "cc@dapr.io", mergedMeta.EmailCc)
|
||||
assert.Equal(t, "bcc@dapr.io", mergedMeta.EmailBcc)
|
||||
assert.Equal(t, "Test email", mergedMeta.Subject)
|
||||
})
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/a8m/documentdb"
|
||||
"github.com/dapr/components-contrib/authentication/azure"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
@ -46,11 +47,26 @@ func (c *CosmosDB) Init(metadata bindings.Metadata) error {
|
|||
}
|
||||
|
||||
c.partitionKey = m.PartitionKey
|
||||
client := documentdb.New(m.URL, &documentdb.Config{
|
||||
MasterKey: &documentdb.Key{
|
||||
|
||||
// Create the client; first, try authenticating with a master key, if present
|
||||
var config *documentdb.Config
|
||||
if m.MasterKey != "" {
|
||||
config = documentdb.NewConfig(&documentdb.Key{
|
||||
Key: m.MasterKey,
|
||||
},
|
||||
})
|
||||
})
|
||||
} else {
|
||||
// Fallback to using Azure AD
|
||||
env, errB := azure.NewEnvironmentSettings("cosmosdb", metadata.Properties)
|
||||
if errB != nil {
|
||||
return errB
|
||||
}
|
||||
spt, errB := env.GetServicePrincipalToken()
|
||||
if errB != nil {
|
||||
return errB
|
||||
}
|
||||
config = documentdb.NewConfigWithServicePrincipal(spt)
|
||||
}
|
||||
client := documentdb.New(m.URL, config)
|
||||
|
||||
dbs, err := client.QueryDatabases(&documentdb.Query{
|
||||
Query: "SELECT * FROM ROOT r WHERE r.id=@id",
|
||||
|
|
|
@ -247,7 +247,7 @@ func (a *AzureEventGrid) createSubscription() error {
|
|||
return err
|
||||
}
|
||||
|
||||
res := result.Future.Response()
|
||||
res := result.FutureAPI.Response()
|
||||
|
||||
if res.StatusCode != fasthttp.StatusCreated {
|
||||
bodyBytes, err := ioutil.ReadAll(res.Body)
|
||||
|
|
|
@ -238,7 +238,7 @@ func (a *AzureEventHubs) Read(handler func(*bindings.ReadResponse) ([]byte, erro
|
|||
signal.Notify(exitChan, os.Interrupt, syscall.SIGTERM)
|
||||
<-exitChan
|
||||
|
||||
a.hub.Close(context.Background())
|
||||
a.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -324,3 +324,7 @@ func (a *AzureEventHubs) RegisterEventProcessor(handler func(*bindings.ReadRespo
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AzureEventHubs) Close() error {
|
||||
return a.hub.Close(context.Background())
|
||||
}
|
||||
|
|
|
@ -177,3 +177,7 @@ func (a *AzureServiceBusQueues) Read(handler func(*bindings.ReadResponse) ([]byt
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AzureServiceBusQueues) Close() error {
|
||||
return a.client.Close(context.Background())
|
||||
}
|
||||
|
|
|
@ -99,3 +99,7 @@ func (g *GCPStorage) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeRespon
|
|||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (g *GCPStorage) Close() error {
|
||||
return g.client.Close()
|
||||
}
|
||||
|
|
|
@ -113,3 +113,7 @@ func (g *GCPPubSub) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeRespons
|
|||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (g *GCPPubSub) Close() error {
|
||||
return g.client.Close()
|
||||
}
|
||||
|
|
|
@ -104,7 +104,13 @@ func (i *Influx) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse,
|
|||
if err != nil {
|
||||
return nil, errors.New("Influx Error: Cannot write point")
|
||||
}
|
||||
i.client.Close()
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (i *Influx) Close() error {
|
||||
i.client.Close()
|
||||
i.writeAPI = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ type Kafka struct {
|
|||
authRequired bool
|
||||
saslUsername string
|
||||
saslPassword string
|
||||
initialOffset int64
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
|
@ -47,6 +48,7 @@ type kafkaMetadata struct {
|
|||
AuthRequired bool `json:"authRequired"`
|
||||
SaslUsername string `json:"saslUsername"`
|
||||
SaslPassword string `json:"saslPassword"`
|
||||
InitialOffset int64 `json:"initialOffset"`
|
||||
MaxMessageBytes int
|
||||
}
|
||||
|
||||
|
@ -99,6 +101,7 @@ func (k *Kafka) Init(metadata bindings.Metadata) error {
|
|||
k.publishTopic = meta.PublishTopic
|
||||
k.consumerGroup = meta.ConsumerGroup
|
||||
k.authRequired = meta.AuthRequired
|
||||
k.initialOffset = meta.InitialOffset
|
||||
|
||||
// ignore SASL properties if authRequired is false
|
||||
if meta.AuthRequired {
|
||||
|
@ -136,6 +139,12 @@ func (k *Kafka) getKafkaMetadata(metadata bindings.Metadata) (*kafkaMetadata, er
|
|||
meta.ConsumerGroup = metadata.Properties["consumerGroup"]
|
||||
meta.PublishTopic = metadata.Properties["publishTopic"]
|
||||
|
||||
initialOffset, err := parseInitialOffset(metadata.Properties["initialOffset"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta.InitialOffset = initialOffset
|
||||
|
||||
if val, ok := metadata.Properties["brokers"]; ok && val != "" {
|
||||
meta.Brokers = strings.Split(val, ",")
|
||||
}
|
||||
|
@ -210,6 +219,7 @@ func (k *Kafka) getSyncProducer(meta *kafkaMetadata) (sarama.SyncProducer, error
|
|||
func (k *Kafka) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
config := sarama.NewConfig()
|
||||
config.Version = sarama.V1_0_0_0
|
||||
config.Consumer.Offsets.Initial = k.initialOffset
|
||||
// ignore SASL properties if authRequired is false
|
||||
if k.authRequired {
|
||||
updateAuthInfo(config, k.saslUsername, k.saslPassword)
|
||||
|
@ -283,3 +293,16 @@ func (k *Kafka) Close() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInitialOffset(value string) (initialOffset int64, err error) {
|
||||
initialOffset = sarama.OffsetNewest // Default
|
||||
if strings.EqualFold(value, "oldest") {
|
||||
initialOffset = sarama.OffsetOldest
|
||||
} else if strings.EqualFold(value, "newest") {
|
||||
initialOffset = sarama.OffsetNewest
|
||||
} else if value != "" {
|
||||
return 0, fmt.Errorf("kafka error: invalid initialOffset: %s", value)
|
||||
}
|
||||
|
||||
return initialOffset, err
|
||||
}
|
||||
|
|
|
@ -9,9 +9,11 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
@ -273,4 +275,17 @@ func TestParseMetadata(t *testing.T) {
|
|||
assert.Error(t, errors.New("kafka error: missing SASL Password"), err)
|
||||
assert.Nil(t, meta)
|
||||
})
|
||||
|
||||
t.Run("correct metadata (initialOffset)", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{"consumerGroup": "a", "publishTopic": "a", "brokers": "a", "topics": "a", "authRequired": "false", "initialOffset": "oldest"}
|
||||
k := Kafka{logger: logger}
|
||||
meta, err := k.getKafkaMetadata(m)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, sarama.OffsetOldest, meta.InitialOffset)
|
||||
m.Properties["initialOffset"] = "newest"
|
||||
meta, err = k.getKafkaMetadata(m)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, sarama.OffsetNewest, meta.InitialOffset)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -129,6 +129,16 @@ func (p *Postgres) Invoke(req *bindings.InvokeRequest) (resp *bindings.InvokeRes
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// Close close PostgreSql instance
|
||||
func (p *Postgres) Close() error {
|
||||
if p.db == nil {
|
||||
return nil
|
||||
}
|
||||
p.db.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgres) query(sql string) (result []byte, err error) {
|
||||
p.logger.Debugf("query: %s", sql)
|
||||
|
||||
|
|
|
@ -112,6 +112,11 @@ func TestPostgresIntegration(t *testing.T) {
|
|||
_, err := b.Invoke(req)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Close", func(t *testing.T) {
|
||||
err := b.Close()
|
||||
assert.NoError(t, err, "expected no error closing output binding")
|
||||
})
|
||||
}
|
||||
|
||||
func assertResponse(t *testing.T, res *bindings.InvokeResponse, err error) {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
@ -20,6 +21,7 @@ const (
|
|||
defaultPriority = 3
|
||||
lowestPriority = 1
|
||||
highestPriority = 5
|
||||
mailSeparator = ";"
|
||||
)
|
||||
|
||||
// Mailer allows sending of emails using the Simple Mail Transfer Protocol
|
||||
|
@ -85,9 +87,14 @@ func (s *Mailer) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse,
|
|||
// Compose message
|
||||
msg := gomail.NewMessage()
|
||||
msg.SetHeader("From", metadata.EmailFrom)
|
||||
msg.SetHeader("To", metadata.EmailTo)
|
||||
msg.SetHeader("CC", metadata.EmailCC)
|
||||
msg.SetHeader("BCC", metadata.EmailBCC)
|
||||
msg.SetHeader("To", metadata.parseAddresses(metadata.EmailTo)...)
|
||||
if metadata.EmailCC != "" {
|
||||
msg.SetHeader("Cc", metadata.parseAddresses(metadata.EmailCC)...)
|
||||
}
|
||||
if metadata.EmailBCC != "" {
|
||||
msg.SetHeader("Bcc", metadata.parseAddresses(metadata.EmailBCC)...)
|
||||
}
|
||||
|
||||
msg.SetHeader("Subject", metadata.Subject)
|
||||
msg.SetHeader("X-priority", strconv.Itoa(metadata.Priority))
|
||||
body, err := strconv.Unquote(string(req.Data))
|
||||
|
@ -117,10 +124,18 @@ func (s *Mailer) parseMetadata(meta bindings.Metadata) (Metadata, error) {
|
|||
smtpMeta := Metadata{}
|
||||
|
||||
// required metadata properties
|
||||
if meta.Properties["host"] == "" || meta.Properties["port"] == "" ||
|
||||
meta.Properties["user"] == "" || meta.Properties["password"] == "" {
|
||||
return smtpMeta, errors.New("smtp binding error: host, port, user and password fields are required in metadata")
|
||||
if meta.Properties["host"] == "" || meta.Properties["port"] == "" {
|
||||
return smtpMeta, errors.New("smtp binding error: host and port fields are required in metadata")
|
||||
}
|
||||
|
||||
//nolint
|
||||
if (meta.Properties["user"] != "" && meta.Properties["password"] == "") ||
|
||||
(meta.Properties["user"] == "" && meta.Properties["password"] != "") {
|
||||
return smtpMeta, errors.New("smtp binding error: user and password fields are required in metadata")
|
||||
} else {
|
||||
s.logger.Warn("smtp binding warn: User and password are empty")
|
||||
}
|
||||
|
||||
smtpMeta.Host = meta.Properties["host"]
|
||||
port, err := strconv.Atoi(meta.Properties["port"])
|
||||
if err != nil {
|
||||
|
@ -205,3 +220,7 @@ func (metadata *Metadata) parsePriority(req string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (metadata Metadata) parseAddresses(addresses string) []string {
|
||||
return strings.Split(addresses, mailSeparator)
|
||||
}
|
||||
|
|
|
@ -95,6 +95,44 @@ func TestParseMetadata(t *testing.T) {
|
|||
assert.NotNil(t, smtpMeta)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("Incorrrect metadata (user, no password)", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"host": "mailserver.dapr.io",
|
||||
"port": "25",
|
||||
"user": "user@dapr.io",
|
||||
"skipTLSVerify": "true",
|
||||
"emailFrom": "from@dapr.io",
|
||||
"emailTo": "to@dapr.io",
|
||||
"emailCC": "cc@dapr.io",
|
||||
"emailBCC": "bcc@dapr.io",
|
||||
"subject": "Test email",
|
||||
"priority": "0",
|
||||
}
|
||||
r := Mailer{logger: logger}
|
||||
smtpMeta, err := r.parseMetadata(m)
|
||||
assert.NotNil(t, smtpMeta)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
t.Run("Incorrrect metadata (no user, password)", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"host": "mailserver.dapr.io",
|
||||
"port": "25",
|
||||
"password": "P@$$w0rd!",
|
||||
"skipTLSVerify": "true",
|
||||
"emailFrom": "from@dapr.io",
|
||||
"emailTo": "to@dapr.io",
|
||||
"emailCC": "cc@dapr.io",
|
||||
"emailBCC": "bcc@dapr.io",
|
||||
"subject": "Test email",
|
||||
"priority": "0",
|
||||
}
|
||||
r := Mailer{logger: logger}
|
||||
smtpMeta, err := r.parseMetadata(m)
|
||||
assert.NotNil(t, smtpMeta)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMergeWithRequestMetadata(t *testing.T) {
|
||||
|
|
|
@ -29,6 +29,7 @@ git clone https://github.com/dapr/components-contrib.git github.com/dapr/compone
|
|||
1. Create your component directory in the right component directory
|
||||
2. Copy component files from the reference component to your component directory
|
||||
3. Add go unit-test for your component
|
||||
4. Add [conformance tests](/tests/conformance/README.md) for your component.
|
||||
|
||||
| Type | Directory | Reference | Docs |
|
||||
|------|-----------|--------------------------|------|
|
||||
|
|
25
go.mod
25
go.mod
|
@ -7,30 +7,31 @@ require (
|
|||
cloud.google.com/go/datastore v1.1.0
|
||||
cloud.google.com/go/pubsub v1.5.0
|
||||
cloud.google.com/go/storage v1.10.0
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.1.0 // indirect
|
||||
github.com/Azure/azure-event-hubs-go/v3 v3.3.10
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible
|
||||
github.com/Azure/azure-service-bus-go v0.10.10
|
||||
github.com/Azure/azure-sdk-for-go v57.2.0+incompatible
|
||||
github.com/Azure/azure-service-bus-go v0.11.1
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0
|
||||
github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd
|
||||
github.com/Azure/go-amqp v0.13.1
|
||||
github.com/Azure/go-autorest/autorest v0.11.12
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2
|
||||
github.com/Azure/go-amqp v0.13.13
|
||||
github.com/Azure/go-autorest/autorest v0.11.21
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect
|
||||
github.com/a8m/documentdb v1.2.1-0.20190920062420-efdd52fe0905
|
||||
github.com/a8m/documentdb v1.3.0
|
||||
github.com/aerospike/aerospike-client-go v4.5.0+incompatible
|
||||
github.com/agrea/ptr v0.0.0-20180711073057-77a518d99b7b
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
github.com/alibaba/sentinel-golang v1.0.2-0.20210728053800-194d4be01dfe
|
||||
github.com/alibaba/sentinel-golang v1.0.3
|
||||
github.com/alicebob/miniredis/v2 v2.13.3
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.0.7+incompatible
|
||||
github.com/aliyun/aliyun-tablestore-go-sdk v1.6.0
|
||||
github.com/andybalholm/brotli v1.0.1 // indirect
|
||||
github.com/apache/pulsar-client-go v0.1.0
|
||||
github.com/apache/rocketmq-client-go/v2 v2.1.0
|
||||
github.com/apache/thrift v0.14.0 // indirect
|
||||
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef
|
||||
github.com/aws/aws-sdk-go v1.36.30
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
||||
|
@ -43,7 +44,7 @@ require (
|
|||
github.com/dancannon/gorethink v4.0.0+incompatible
|
||||
github.com/dapr/kit v0.0.2-0.20210614175626-b9074b64d233
|
||||
github.com/deepmap/oapi-codegen v1.8.1 // indirect
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20210411162248-d9abbec934ba
|
||||
github.com/dghubble/go-twitter v0.0.0-20190719072343-39e5462e111f
|
||||
github.com/dghubble/oauth1 v0.6.0
|
||||
github.com/didip/tollbooth v4.0.2+incompatible
|
||||
|
@ -95,7 +96,7 @@ require (
|
|||
github.com/nacos-group/nacos-sdk-go v1.0.8
|
||||
github.com/nats-io/nats-server/v2 v2.2.1 // indirect
|
||||
github.com/nats-io/nats-streaming-server v0.21.2 // indirect
|
||||
github.com/nats-io/nats.go v1.10.1-0.20210330225420-a0b1f60162f8
|
||||
github.com/nats-io/nats.go v1.12.0
|
||||
github.com/nats-io/stan.go v0.8.3
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
|
@ -122,7 +123,7 @@ require (
|
|||
github.com/yudai/gojsondiff v1.0.0 // indirect
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
|
||||
github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect
|
||||
go.mongodb.org/mongo-driver v1.1.2
|
||||
go.mongodb.org/mongo-driver v1.5.1
|
||||
go.opencensus.io v0.22.5 // indirect
|
||||
go.uber.org/atomic v1.8.0 // indirect
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
|
|
128
go.sum
128
go.sum
|
@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.1.0 h1:1N4YSkWYWffOpQHromYdOucBSQXhNRKzqtgICy6To8Q=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.2.0 h1:BK/3P4TW4z2HLD6G5tMlHRvptOxxi4s9ee5r8sdHBUs=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.2.0/go.mod h1:zN7QL/vfCsq3XQxQaTkg4ScO786CA2rQnZ1LXX7QryE=
|
||||
github.com/Azure/azure-event-hubs-go/v3 v3.3.10 h1:YJDY8hHs1NTMs0VqKyLIvlDSgR7um2L1CTUtsgEEPNs=
|
||||
github.com/Azure/azure-event-hubs-go/v3 v3.3.10/go.mod h1:sszMsQpFy8Au2s2NColbnJY8lRVm1koW0XxBJ3rN5TY=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
|
@ -48,18 +48,20 @@ github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9a
|
|||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-service-bus-go v0.10.10 h1:PgwL3RAaPgxY4Efe/iqNiZ/qrfibJNli3E6z5ue2f5w=
|
||||
github.com/Azure/azure-service-bus-go v0.10.10/go.mod h1:o5z/3lDG1iT/T/G7vgIwIqVDTx9Qa2wndf5OdzSzpF8=
|
||||
github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v57.2.0+incompatible h1:zoJapafogLazoyp0x9aQENzNNqxvU6pnGtb2P8/i+HI=
|
||||
github.com/Azure/azure-sdk-for-go v57.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-service-bus-go v0.11.1 h1:LH1gMbv8tciAH9VPTgqGKPIvc7TrpO3j/15OkL8wHU8=
|
||||
github.com/Azure/azure-service-bus-go v0.11.1/go.mod h1:9ta+jToyzWVnE1I4KbceZ/+ps+tos8Qz/v7MBgFE2Z8=
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs=
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE=
|
||||
github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo=
|
||||
github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8=
|
||||
github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs=
|
||||
github.com/Azure/go-amqp v0.13.1 h1:dXnEJ89Hf7wMkcBbLqvocZlM4a3uiX9uCxJIvU77+Oo=
|
||||
github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs=
|
||||
github.com/Azure/go-amqp v0.13.13 h1:OBPwCO50EzniOyZR0M4VbGJYDxceIy3SFOnKVMJktdY=
|
||||
github.com/Azure/go-amqp v0.13.13/go.mod h1:D5ZrjQqB1dyp1A+G73xeL/kNn7D5qHJIIsNNps7YNmk=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
|
@ -67,21 +69,27 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B
|
|||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.7/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest v0.11.21 h1:w77zY/9RnUAWcIQyDC0Fc89mCvwftR8F+zsR/OH6enk=
|
||||
github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
|
@ -96,11 +104,12 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI
|
|||
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
|
@ -131,8 +140,8 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrU
|
|||
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY=
|
||||
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/a8m/documentdb v1.2.1-0.20190920062420-efdd52fe0905 h1:lrOYmNobGcyWEjvMIMJERJx1Y4ttPFobY7RHAD+6e10=
|
||||
github.com/a8m/documentdb v1.2.1-0.20190920062420-efdd52fe0905/go.mod h1:4Z0mpi7fkyqjxUdGiNMO3vagyiUoiwLncaIX6AsW5z0=
|
||||
github.com/a8m/documentdb v1.3.0 h1:xzZQ6Ts02QesHeQdRr6doF7xfXYSsq9SUIlCqfJjbv4=
|
||||
github.com/a8m/documentdb v1.3.0/go.mod h1:4Z0mpi7fkyqjxUdGiNMO3vagyiUoiwLncaIX6AsW5z0=
|
||||
github.com/aerospike/aerospike-client-go v4.5.0+incompatible h1:6ALev/Ge4jW5avSLoqgvPYTh+FLeeDD9xDhzoMCNgOo=
|
||||
github.com/aerospike/aerospike-client-go v4.5.0+incompatible/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
|
@ -145,8 +154,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alibaba/sentinel-golang v1.0.2-0.20210728053800-194d4be01dfe h1:Mcvbdbmprmyb/CxBbDLkrN4cXEl5NB0ZpzRRe0VVlf0=
|
||||
github.com/alibaba/sentinel-golang v1.0.2-0.20210728053800-194d4be01dfe/go.mod h1:Lag5rIYyJiPOylK8Kku2P+a23gdKMMqzQS7wTnjWEpk=
|
||||
github.com/alibaba/sentinel-golang v1.0.3 h1:x/04ZV3ONFsLaNYC/tOEEaZZQIJjhxDSxwZGxiWOQhY=
|
||||
github.com/alibaba/sentinel-golang v1.0.3/go.mod h1:Lag5rIYyJiPOylK8Kku2P+a23gdKMMqzQS7wTnjWEpk=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis/v2 v2.13.3 h1:kohgdtN58KW/r9ZDVmMJE3MrfbumwsDQStd0LPAGmmw=
|
||||
|
@ -155,6 +164,8 @@ github.com/aliyun/alibaba-cloud-sdk-go v1.61.18 h1:zOVTBdCKFd9JbCKz9/nt+FovbjPFm
|
|||
github.com/aliyun/alibaba-cloud-sdk-go v1.61.18/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.0.7+incompatible h1:HXvOJsZw8JT/ldxjX74Aq4H2IY4ojV/mXMDPWFitpv8=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.0.7+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/aliyun/aliyun-tablestore-go-sdk v1.6.0 h1:Vug1AcQD1bOW1AMrr+61oTCP/NWhGDYzN2FuMXT78yQ=
|
||||
github.com/aliyun/aliyun-tablestore-go-sdk v1.6.0/go.mod h1:jixoiNNRR/4ziq0yub1fTlxmDcQwlpkaujpaWIATQWM=
|
||||
github.com/aliyunmq/mq-http-go-sdk v1.0.3 h1:/uhH7DUoaw9XTtsPgDp7zdPUyG5FBKj2GmJJph9z+6o=
|
||||
github.com/aliyunmq/mq-http-go-sdk v1.0.3/go.mod h1:JYfRMQoPexERvnNNBcal0ZQ2TVQ5ialDiW9ScjaadEM=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
|
@ -176,12 +187,15 @@ github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7
|
|||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef h1:2JGTg6JapxP9/R33ZaagQtAM4EkkSYnIAlOG5EI8gkM=
|
||||
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef/go.mod h1:JS7hed4L1fj0hXcyEejnW57/7LCetXggd+vwrRnYeII=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.19.38/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.36.30 h1:hAwyfe7eZa7sM+S5mIJZFiNFwJMia9Whz6CYblioLoU=
|
||||
github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
|
@ -271,8 +285,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/deepmap/oapi-codegen v1.3.6/go.mod h1:aBozjEveG+33xPiP55Iw/XbVkhtZHEGLq3nxlX0+hfU=
|
||||
github.com/deepmap/oapi-codegen v1.8.1 h1:gSKgzu1DvWfRctnr0UVwieWkg1LEecP0C2htZyBwDTA=
|
||||
github.com/deepmap/oapi-codegen v1.8.1/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73 h1:OGNva6WhsKst5OZf7eZOklDztV3hwtTHovdrLHV+MsA=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20210411162248-d9abbec934ba h1:HuzamveGKQH9cN1TrsZgEoG0sHvTa5j3LKquWaHR3sY=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20210411162248-d9abbec934ba/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
|
||||
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
|
||||
github.com/dghubble/go-twitter v0.0.0-20190719072343-39e5462e111f h1:M2wB039zeS1/LZtN/3A7tWyfctiOBL4ty5PURBmDdWU=
|
||||
|
@ -287,8 +301,9 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu
|
|||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/didip/tollbooth v4.0.2+incompatible h1:fVSa33JzSz0hoh2NxpwZtksAzAgd7zjmGO20HCZtF4M=
|
||||
github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
|
||||
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
|
||||
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
|
||||
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
|
@ -334,7 +349,6 @@ github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
|||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
|
@ -402,6 +416,30 @@ github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gG
|
|||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
|
||||
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
|
||||
github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
|
||||
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
|
||||
github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
|
||||
github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
|
||||
github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
|
||||
github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
|
||||
github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
|
||||
github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
|
||||
github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
|
||||
github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
|
||||
github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
|
||||
github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
|
||||
github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
|
||||
github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
|
||||
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
|
||||
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
|
||||
|
@ -429,6 +467,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
|||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
@ -676,6 +716,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
|
|||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
||||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kataras/go-errors v0.0.3 h1:RQSGEb5AHjsGbwhNW8mFC7a9JrgoCLHC8CBQ4keXJYU=
|
||||
github.com/kataras/go-errors v0.0.3/go.mod h1:K3ncz8UzwI3bpuksXt5tQLmrRlgxfv+52ARvAu1+I+o=
|
||||
github.com/kataras/go-serializer v0.0.4 h1:isugggrY3DSac67duzQ/tn31mGAUtYqNpE2ob6Xt/SY=
|
||||
|
@ -687,6 +729,7 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
|
|||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
|
@ -730,6 +773,8 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
|||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
|
||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
|
@ -786,6 +831,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
|||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
|
@ -829,8 +875,9 @@ github.com/nats-io/nats.go v1.10.1-0.20201021145452-94be476ad6e0/go.mod h1:VU2zE
|
|||
github.com/nats-io/nats.go v1.10.1-0.20210127212649-5b4924938a9a/go.mod h1:Sa3kLIonafChP5IF0b55i9uvGR10I3hPETFbi4+9kOI=
|
||||
github.com/nats-io/nats.go v1.10.1-0.20210211000709-75ded9c77585/go.mod h1:uBWnCKg9luW1g7hgzPxUjHFRI40EuTSX7RCzgnc74Jk=
|
||||
github.com/nats-io/nats.go v1.10.1-0.20210228004050-ed743748acac/go.mod h1:hxFvLNbNmT6UppX5B5Tr/r3g+XSwGjJzFn6mxPNJEHc=
|
||||
github.com/nats-io/nats.go v1.10.1-0.20210330225420-a0b1f60162f8 h1:z/0dTBxMgMfWOtmpyHrbIDKx2duzrxkUeQYJMUnRPj4=
|
||||
github.com/nats-io/nats.go v1.10.1-0.20210330225420-a0b1f60162f8/go.mod h1:Zq9IEHy7zurF0kFbU5aLIknnFI7guh8ijHk+2v+Vf5g=
|
||||
github.com/nats-io/nats.go v1.12.0 h1:n0oZzK2aIZDMKuEiMKJ9qkCUgVY5vTAAksSXtLlz5Xc=
|
||||
github.com/nats-io/nats.go v1.12.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
|
@ -891,6 +938,7 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
|
|||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
|
@ -956,6 +1004,8 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn
|
|||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
|
@ -987,6 +1037,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
|
|||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
|
@ -1049,6 +1100,7 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0
|
|||
github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
|
||||
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
|
@ -1077,9 +1129,13 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+
|
|||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/vmware/vmware-go-kcl v0.0.0-20191104173950-b6c74c3fe74e h1:KeXc49gLugrPowKxekYZBZ34FEQW5+R6lP8B56B02mo=
|
||||
github.com/vmware/vmware-go-kcl v0.0.0-20191104173950-b6c74c3fe74e/go.mod h1:JFn5wAwfmRZgv/VScA9aUc51zOVL5395yPKGxPi3eNo=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
|
@ -1092,6 +1148,8 @@ github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJx
|
|||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY=
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
|
||||
|
@ -1111,8 +1169,8 @@ go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
|||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||
go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=
|
||||
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=
|
||||
go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
@ -1164,6 +1222,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
|||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -1172,6 +1231,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -1181,6 +1241,7 @@ golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -1279,6 +1340,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1303,11 +1365,13 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1392,11 +1456,15 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
|
|||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
|
|
|
@ -38,7 +38,8 @@ type sqsQueueInfo struct {
|
|||
type snsSqsMetadata struct {
|
||||
// name of the queue for this application. The is provided by the runtime as "consumerID"
|
||||
sqsQueueName string
|
||||
|
||||
// name of the dead letter queue for this application
|
||||
sqsDeadLettersQueueName string
|
||||
// aws endpoint for the component to use.
|
||||
Endpoint string
|
||||
// access key to use for accessing sqs/sns
|
||||
|
@ -54,6 +55,9 @@ type snsSqsMetadata struct {
|
|||
messageVisibilityTimeout int64
|
||||
// number of times to resend a message after processing of that message fails before removing that message from the queue. Default: 10
|
||||
messageRetryLimit int64
|
||||
// if sqsDeadLettersQueueName is set to a value, then the messageReceiveLimit defines the number of times a message is received
|
||||
// before it is moved to the dead-letters queue. This value must be smaller than messageRetryLimit
|
||||
messageReceiveLimit int64
|
||||
// amount of time to await receipt of a message before making another request. Default: 1
|
||||
messageWaitTimeSeconds int64
|
||||
// maximum number of messages to receive from the queue at a time. Default: 10, Maximum: 10
|
||||
|
@ -65,6 +69,7 @@ const (
|
|||
awsSnsTopicNameKey = "dapr-topic-name"
|
||||
)
|
||||
|
||||
// NewSnsSqs - constructor for a new snssqs dapr component
|
||||
func NewSnsSqs(l logger.Logger) pubsub.PubSub {
|
||||
return &snsSqs{
|
||||
logger: l,
|
||||
|
@ -86,7 +91,7 @@ func getAliasedProperty(aliases []string, metadata pubsub.Metadata) (string, boo
|
|||
func parseInt64(input string, propertyName string) (int64, error) {
|
||||
number, err := strconv.Atoi(input)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("parsing %s failed with: %v", propertyName, err)
|
||||
return -1, fmt.Errorf("parsing %s failed with: %w", propertyName, err)
|
||||
}
|
||||
|
||||
return int64(number), nil
|
||||
|
@ -175,6 +180,24 @@ func (s *snsSqs) getSnsSqsMetatdata(metadata pubsub.Metadata) (*snsSqsMetadata,
|
|||
md.messageRetryLimit = retryLimit
|
||||
}
|
||||
|
||||
if val, ok := getAliasedProperty([]string{"sqsDeadLettersQueueName"}, metadata); ok {
|
||||
md.sqsDeadLettersQueueName = val
|
||||
}
|
||||
|
||||
if val, ok := getAliasedProperty([]string{"messageReceiveLimit"}, metadata); ok {
|
||||
messageReceiveLimit, err := parseInt64(val, "messageReceiveLimit")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// assign: used provided configuration
|
||||
md.messageReceiveLimit = messageReceiveLimit
|
||||
}
|
||||
|
||||
// XOR on having either a valid messageReceiveLimit and invalid sqsDeadLettersQueueName, and vice versa
|
||||
if (md.messageReceiveLimit > 0 || len(md.sqsDeadLettersQueueName) > 0) && !(md.messageReceiveLimit > 0 && len(md.sqsDeadLettersQueueName) > 0) {
|
||||
return nil, errors.New("to use SQS dead letters queue, messageReceiveLimit and sqsDeadLettersQueueName must both be set to a value")
|
||||
}
|
||||
|
||||
if val, ok := props["messageWaitTimeSeconds"]; !ok {
|
||||
md.messageWaitTimeSeconds = 1
|
||||
} else {
|
||||
|
@ -377,7 +400,7 @@ func (s *snsSqs) acknowledgeMessage(queueURL string, receiptHandle *string) erro
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *snsSqs) handleMessage(message *sqs.Message, queueInfo *sqsQueueInfo, handler pubsub.Handler) error {
|
||||
func (s *snsSqs) handleMessage(message *sqs.Message, queueInfo, deadLettersQueueInfo *sqsQueueInfo, handler pubsub.Handler) error {
|
||||
// if this message has been received > x times, delete from queue, it's borked
|
||||
recvCount, ok := message.Attributes[sqs.MessageSystemAttributeNameApproximateReceiveCount]
|
||||
|
||||
|
@ -391,23 +414,28 @@ func (s *snsSqs) handleMessage(message *sqs.Message, queueInfo *sqsQueueInfo, ha
|
|||
return fmt.Errorf("error parsing ApproximateReceiveCount from message: %v", message)
|
||||
}
|
||||
|
||||
// if we are over the allowable retry limit, delete the message from the queue
|
||||
// TODO dead letter queue
|
||||
if recvCountInt >= s.metadata.messageRetryLimit {
|
||||
// if we are over the allowable retry limit, and there is no dead-letters queue, delete the message from the queue.
|
||||
if deadLettersQueueInfo == nil && recvCountInt >= s.metadata.messageRetryLimit {
|
||||
if innerErr := s.acknowledgeMessage(queueInfo.url, message.ReceiptHandle); innerErr != nil {
|
||||
return fmt.Errorf("error acknowledging message after receiving the message too many times: %v", innerErr)
|
||||
return fmt.Errorf("error acknowledging message after receiving the message too many times: %w", innerErr)
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"message received greater than %v times, deleting this message without further processing", s.metadata.messageRetryLimit)
|
||||
}
|
||||
// ... else, there is no need to actively do something if we reached the limit defined in messageReceiveLimit as the message had
|
||||
// already been moved to the dead-letters queue by SQS
|
||||
if deadLettersQueueInfo != nil && recvCountInt >= s.metadata.messageReceiveLimit {
|
||||
s.logger.Warnf(
|
||||
"message received greater than %v times, moving this message without further processing to dead-letters queue: %v", s.metadata.messageReceiveLimit, s.metadata.sqsDeadLettersQueueName)
|
||||
}
|
||||
|
||||
// otherwise try to handle the message
|
||||
var messageBody snsMessage
|
||||
err = json.Unmarshal([]byte(*(message.Body)), &messageBody)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshalling message: %v", err)
|
||||
return fmt.Errorf("error unmarshalling message: %w", err)
|
||||
}
|
||||
|
||||
topic := parseTopicArn(messageBody.TopicArn)
|
||||
|
@ -418,14 +446,14 @@ func (s *snsSqs) handleMessage(message *sqs.Message, queueInfo *sqsQueueInfo, ha
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error handling message: %v", err)
|
||||
return fmt.Errorf("error handling message: %w", err)
|
||||
}
|
||||
|
||||
// otherwise, there was no error, acknowledge the message
|
||||
return s.acknowledgeMessage(queueInfo.url, message.ReceiptHandle)
|
||||
}
|
||||
|
||||
func (s *snsSqs) consumeSubscription(queueInfo *sqsQueueInfo, handler pubsub.Handler) {
|
||||
func (s *snsSqs) consumeSubscription(queueInfo, deadLettersQueueInfo *sqsQueueInfo, handler pubsub.Handler) {
|
||||
go func() {
|
||||
for {
|
||||
messageResponse, err := s.sqsClient.ReceiveMessage(&sqs.ReceiveMessageInput{
|
||||
|
@ -454,7 +482,7 @@ func (s *snsSqs) consumeSubscription(queueInfo *sqsQueueInfo, handler pubsub.Han
|
|||
s.logger.Debugf("%v message(s) received", len(messageResponse.Messages))
|
||||
|
||||
for _, m := range messageResponse.Messages {
|
||||
if err := s.handleMessage(m, queueInfo, handler); err != nil {
|
||||
if err := s.handleMessage(m, queueInfo, deadLettersQueueInfo, handler); err != nil {
|
||||
s.logger.Error(err)
|
||||
}
|
||||
}
|
||||
|
@ -462,6 +490,41 @@ func (s *snsSqs) consumeSubscription(queueInfo *sqsQueueInfo, handler pubsub.Han
|
|||
}()
|
||||
}
|
||||
|
||||
func (s *snsSqs) createDeadLettersQueue() (*sqsQueueInfo, error) {
|
||||
var deadLettersQueueInfo *sqsQueueInfo
|
||||
deadLettersQueueInfo, err := s.getOrCreateQueue(s.metadata.sqsDeadLettersQueueName)
|
||||
if err != nil {
|
||||
s.logger.Errorf("error retrieving SQS dead-letter queue: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return deadLettersQueueInfo, nil
|
||||
}
|
||||
|
||||
func (s *snsSqs) createQueueAttributesWithDeadLetters(queueInfo, deadLettersQueueInfo *sqsQueueInfo) (*sqs.SetQueueAttributesInput, error) {
|
||||
policy := map[string]string{
|
||||
"deadLetterTargetArn": deadLettersQueueInfo.arn,
|
||||
"maxReceiveCount": strconv.FormatInt(s.metadata.messageReceiveLimit, 10),
|
||||
}
|
||||
|
||||
b, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
s.logger.Errorf("error marshalling dead-letters queue policy: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sqsSetQueueAttributesInput := &sqs.SetQueueAttributesInput{
|
||||
QueueUrl: &queueInfo.url,
|
||||
Attributes: map[string]*string{
|
||||
sqs.QueueAttributeNameRedrivePolicy: aws.String(string(b)),
|
||||
},
|
||||
}
|
||||
|
||||
return sqsSetQueueAttributesInput, nil
|
||||
}
|
||||
|
||||
func (s *snsSqs) Subscribe(req pubsub.SubscribeRequest, handler pubsub.Handler) error {
|
||||
// subscribers declare a topic ARN
|
||||
// and declare a SQS queue to use
|
||||
|
@ -475,13 +538,41 @@ func (s *snsSqs) Subscribe(req pubsub.SubscribeRequest, handler pubsub.Handler)
|
|||
}
|
||||
|
||||
// this is the ID of the application, it is supplied via runtime as "consumerID"
|
||||
queueInfo, err := s.getOrCreateQueue(s.metadata.sqsQueueName)
|
||||
var queueInfo *sqsQueueInfo
|
||||
queueInfo, err = s.getOrCreateQueue(s.metadata.sqsQueueName)
|
||||
if err != nil {
|
||||
s.logger.Errorf("error retrieving SQS queue: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var deadLettersQueueInfo *sqsQueueInfo
|
||||
if len(s.metadata.sqsDeadLettersQueueName) > 0 {
|
||||
var derr error
|
||||
deadLettersQueueInfo, derr = s.createDeadLettersQueue()
|
||||
if derr != nil {
|
||||
s.logger.Errorf("error creating dead-letter queue: %v", derr)
|
||||
|
||||
return derr
|
||||
}
|
||||
|
||||
var sqsSetQueueAttributesInput *sqs.SetQueueAttributesInput
|
||||
sqsSetQueueAttributesInput, derr = s.createQueueAttributesWithDeadLetters(queueInfo, deadLettersQueueInfo)
|
||||
if derr != nil {
|
||||
s.logger.Errorf("error creatubg queue attributes for dead-letter queue: %v", derr)
|
||||
|
||||
return derr
|
||||
}
|
||||
_, derr = s.sqsClient.SetQueueAttributes(sqsSetQueueAttributesInput)
|
||||
if derr != nil {
|
||||
s.logger.Errorf("error updating queue attributes with dead-letter queue: %v", derr)
|
||||
|
||||
return derr
|
||||
}
|
||||
}
|
||||
|
||||
// apply the dead letters queue attributes to the current queue
|
||||
|
||||
// subscription creation is idempotent. Subscriptions are unique by topic/queue
|
||||
subscribeOutput, err := s.snsClient.Subscribe(&sns.SubscribeInput{
|
||||
Attributes: nil,
|
||||
|
@ -499,7 +590,7 @@ func (s *snsSqs) Subscribe(req pubsub.SubscribeRequest, handler pubsub.Handler)
|
|||
s.subscriptions = append(s.subscriptions, subscribeOutput.SubscriptionArn)
|
||||
s.logger.Debugf("Subscribed to topic %s: %v", req.Topic, subscribeOutput)
|
||||
|
||||
s.consumeSubscription(queueInfo, handler)
|
||||
s.consumeSubscription(queueInfo, deadLettersQueueInfo, handler)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,7 +8,13 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testUnitFixture struct {
|
||||
metadata pubsub.Metadata
|
||||
name string
|
||||
}
|
||||
|
||||
func Test_parseTopicArn(t *testing.T) {
|
||||
t.Parallel()
|
||||
// no further guarantees are made about this function
|
||||
r := require.New(t)
|
||||
r.Equal("qqnoob", parseTopicArn("arn:aws:sqs:us-east-1:000000000000:qqnoob"))
|
||||
|
@ -16,6 +22,7 @@ func Test_parseTopicArn(t *testing.T) {
|
|||
|
||||
// Verify that all metadata ends up in the correct spot
|
||||
func Test_getSnsSqsMetatdata_AllConfiguration(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
|
@ -30,10 +37,12 @@ func Test_getSnsSqsMetatdata_AllConfiguration(t *testing.T) {
|
|||
"secretKey": "s",
|
||||
"sessionToken": "t",
|
||||
"region": "r",
|
||||
"sqsDeadLettersQueueName": "q",
|
||||
"messageVisibilityTimeout": "2",
|
||||
"messageRetryLimit": "3",
|
||||
"messageWaitTimeSeconds": "4",
|
||||
"messageMaxNumber": "5",
|
||||
"messageReceiveLimit": "6",
|
||||
}})
|
||||
|
||||
r.NoError(err)
|
||||
|
@ -44,13 +53,16 @@ func Test_getSnsSqsMetatdata_AllConfiguration(t *testing.T) {
|
|||
r.Equal("s", md.SecretKey)
|
||||
r.Equal("t", md.SessionToken)
|
||||
r.Equal("r", md.Region)
|
||||
r.Equal("q", md.sqsDeadLettersQueueName)
|
||||
r.Equal(int64(2), md.messageVisibilityTimeout)
|
||||
r.Equal(int64(3), md.messageRetryLimit)
|
||||
r.Equal(int64(4), md.messageWaitTimeSeconds)
|
||||
r.Equal(int64(5), md.messageMaxNumber)
|
||||
r.Equal(int64(6), md.messageReceiveLimit)
|
||||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_defaults(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
|
@ -80,6 +92,7 @@ func Test_getSnsSqsMetatdata_defaults(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_legacyaliases(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
|
@ -107,117 +120,122 @@ func Test_getSnsSqsMetatdata_legacyaliases(t *testing.T) {
|
|||
r.Equal(int64(10), md.messageMaxNumber)
|
||||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_invalidMessageVisibility(t *testing.T) {
|
||||
func testMetadataParsingShouldFail(t *testing.T, metadata pubsub.Metadata, l logger.Logger) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
|
||||
ps := snsSqs{
|
||||
logger: l,
|
||||
}
|
||||
|
||||
md, err := ps.getSnsSqsMetatdata(pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageVisibilityTimeout": "-100",
|
||||
}})
|
||||
md, err := ps.getSnsSqsMetatdata(metadata)
|
||||
|
||||
r.Error(err)
|
||||
r.Nil(md)
|
||||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_invalidMessageRetryLimit(t *testing.T) {
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
ps := snsSqs{
|
||||
logger: l,
|
||||
func Test_getSnsSqsMetatdata_invalidMetadataSetup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixtures := []testUnitFixture{
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageReceiveLimit": "100",
|
||||
}},
|
||||
name: "deadletters receive limit without deadletters queue name",
|
||||
},
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"sqsDeadLettersQueueName": "my-queue",
|
||||
}},
|
||||
name: "deadletters message queue without deadletters receive limit",
|
||||
},
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageMaxNumber": "-100",
|
||||
}},
|
||||
name: "illigal message max number (negative, too low)",
|
||||
},
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageMaxNumber": "100",
|
||||
}},
|
||||
name: "illigal message max number (too high)",
|
||||
},
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageWaitTimeSeconds": "0",
|
||||
}},
|
||||
name: "invalid wait time seconds (too low)",
|
||||
},
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageVisibilityTimeout": "-100",
|
||||
}},
|
||||
name: "invalid message visibility",
|
||||
},
|
||||
{
|
||||
metadata: pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageRetryLimit": "-100",
|
||||
}},
|
||||
name: "invalid message retry limit",
|
||||
},
|
||||
}
|
||||
|
||||
md, err := ps.getSnsSqsMetatdata(pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageRetryLimit": "-100",
|
||||
}})
|
||||
|
||||
r.Error(err)
|
||||
r.Nil(md)
|
||||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_invalidWaitTimeSecondsTooLow(t *testing.T) {
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
ps := snsSqs{
|
||||
logger: l,
|
||||
|
||||
for _, tc := range fixtures {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
testMetadataParsingShouldFail(t, tc.metadata, l)
|
||||
})
|
||||
}
|
||||
|
||||
md, err := ps.getSnsSqsMetatdata(pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageWaitTimeSeconds": "0",
|
||||
}})
|
||||
|
||||
r.Error(err)
|
||||
r.Nil(md)
|
||||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_invalidMessageMaxNumberTooHigh(t *testing.T) {
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
ps := snsSqs{
|
||||
logger: l,
|
||||
}
|
||||
|
||||
md, err := ps.getSnsSqsMetatdata(pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageMaxNumber": "100",
|
||||
}})
|
||||
|
||||
r.Error(err)
|
||||
r.Nil(md)
|
||||
}
|
||||
|
||||
func Test_getSnsSqsMetatdata_invalidMessageMaxNumberTooLow(t *testing.T) {
|
||||
r := require.New(t)
|
||||
l := logger.NewLogger("SnsSqs unit test")
|
||||
l.SetOutputLevel(logger.DebugLevel)
|
||||
ps := snsSqs{
|
||||
logger: l,
|
||||
}
|
||||
|
||||
md, err := ps.getSnsSqsMetatdata(pubsub.Metadata{Properties: map[string]string{
|
||||
"consumerID": "consumer",
|
||||
"Endpoint": "endpoint",
|
||||
"AccessKey": "acctId",
|
||||
"SecretKey": "secret",
|
||||
"awsToken": "token",
|
||||
"Region": "region",
|
||||
"messageMaxNumber": "-100",
|
||||
}})
|
||||
|
||||
r.Error(err)
|
||||
r.Nil(md)
|
||||
}
|
||||
|
||||
func Test_parseInt64(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
number, err := parseInt64("applesauce", "propertyName")
|
||||
r.EqualError(err, "parsing propertyName failed with: strconv.Atoi: parsing \"applesauce\": invalid syntax")
|
||||
|
@ -235,6 +253,7 @@ func Test_parseInt64(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_replaceNameToAWSSanitizedName(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
|
||||
s := `Some_invalid-name // for an AWS resource &*()*&&^Some invalid name // for an AWS resource &*()*&&^Some invalid
|
||||
|
|
|
@ -3,7 +3,6 @@ package pubsub
|
|||
// GCPPubSubMetaData pubsub metadata
|
||||
type metadata struct {
|
||||
consumerID string
|
||||
DisableEntityManagement bool
|
||||
Type string
|
||||
IdentityProjectID string
|
||||
ProjectID string
|
||||
|
@ -15,4 +14,6 @@ type metadata struct {
|
|||
TokenURI string
|
||||
AuthProviderCertURL string
|
||||
ClientCertURL string
|
||||
DisableEntityManagement bool
|
||||
EnableMessageOrdering bool
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ const (
|
|||
metadataClientX509CertURLKey = "clientX509CertUrl"
|
||||
metadataPrivateKeyKey = "privateKey"
|
||||
metadataDisableEntityManagementKey = "disableEntityManagement"
|
||||
metadataEnableMessageOrderingKey = "enableMessageOrdering"
|
||||
)
|
||||
|
||||
// GCPPubSub type
|
||||
|
@ -123,6 +124,12 @@ func createMetadata(pubSubMetadata pubsub.Metadata) (*metadata, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if val, found := pubSubMetadata.Properties[metadataEnableMessageOrderingKey]; found && val != "" {
|
||||
if boolVal, err := strconv.ParseBool(val); err == nil {
|
||||
result.EnableMessageOrdering = boolVal
|
||||
}
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
|
@ -277,7 +284,7 @@ func (g *GCPPubSub) ensureSubscription(subscription string, topic string) error
|
|||
exists, subErr := entity.Exists(context.Background())
|
||||
if !exists {
|
||||
_, subErr = g.client.CreateSubscription(context.Background(), managedSubscription,
|
||||
gcppubsub.SubscriptionConfig{Topic: g.getTopic(topic)})
|
||||
gcppubsub.SubscriptionConfig{Topic: g.getTopic(topic), EnableMessageOrdering: g.metadata.EnableMessageOrdering})
|
||||
}
|
||||
|
||||
return subErr
|
||||
|
|
|
@ -22,6 +22,7 @@ func TestInit(t *testing.T) {
|
|||
"identityProjectId": "project1",
|
||||
"tokenUri": "https://token",
|
||||
"type": "serviceaccount",
|
||||
"enableMessageOrdering": "true",
|
||||
}
|
||||
b, err := createMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
|
@ -36,6 +37,7 @@ func TestInit(t *testing.T) {
|
|||
assert.Equal(t, "project1", b.IdentityProjectID)
|
||||
assert.Equal(t, "https://token", b.TokenURI)
|
||||
assert.Equal(t, "serviceaccount", b.Type)
|
||||
assert.Equal(t, true, b.EnableMessageOrdering)
|
||||
})
|
||||
|
||||
t.Run("metadata is correct with implicit creds", func(t *testing.T) {
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
package inmemory
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/asaskevich/EventBus"
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
type bus struct {
|
||||
bus EventBus.Bus
|
||||
ctx context.Context
|
||||
log logger.Logger
|
||||
}
|
||||
|
||||
func New(logger logger.Logger) pubsub.PubSub {
|
||||
return &bus{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *bus) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *bus) Features() []pubsub.Feature {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *bus) Init(metadata pubsub.Metadata) error {
|
||||
a.bus = EventBus.New()
|
||||
a.ctx = context.Background()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *bus) Publish(req *pubsub.PublishRequest) error {
|
||||
a.bus.Publish(req.Topic, a.ctx, req.Data)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *bus) Subscribe(req pubsub.SubscribeRequest, handler pubsub.Handler) error {
|
||||
return a.bus.Subscribe(req.Topic, func(ctx context.Context, data []byte) {
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := handler(ctx, &pubsub.NewMessage{Data: data, Topic: req.Topic, Metadata: req.Metadata}); err != nil {
|
||||
a.log.Error(err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package inmemory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewInMemoryBus(t *testing.T) {
|
||||
bus := New(logger.NewLogger("test"))
|
||||
bus.Init(pubsub.Metadata{})
|
||||
|
||||
ch := make(chan []byte)
|
||||
bus.Subscribe(pubsub.SubscribeRequest{Topic: "demo"}, func(ctx context.Context, msg *pubsub.NewMessage) error {
|
||||
return publish(ch, msg)
|
||||
})
|
||||
|
||||
bus.Publish(&pubsub.PublishRequest{Data: []byte("ABCD"), Topic: "demo"})
|
||||
assert.Equal(t, "ABCD", string(<-ch))
|
||||
}
|
||||
|
||||
func TestMultipleSubscribers(t *testing.T) {
|
||||
bus := New(logger.NewLogger("test"))
|
||||
bus.Init(pubsub.Metadata{})
|
||||
|
||||
ch1 := make(chan []byte)
|
||||
ch2 := make(chan []byte)
|
||||
bus.Subscribe(pubsub.SubscribeRequest{Topic: "demo"}, func(ctx context.Context, msg *pubsub.NewMessage) error {
|
||||
return publish(ch1, msg)
|
||||
})
|
||||
|
||||
bus.Subscribe(pubsub.SubscribeRequest{Topic: "demo"}, func(ctx context.Context, msg *pubsub.NewMessage) error {
|
||||
return publish(ch2, msg)
|
||||
})
|
||||
|
||||
bus.Publish(&pubsub.PublishRequest{Data: []byte("ABCD"), Topic: "demo"})
|
||||
|
||||
assert.Equal(t, "ABCD", string(<-ch1))
|
||||
assert.Equal(t, "ABCD", string(<-ch2))
|
||||
}
|
||||
|
||||
func TestRetry(t *testing.T) {
|
||||
bus := New(logger.NewLogger("test"))
|
||||
bus.Init(pubsub.Metadata{})
|
||||
|
||||
ch := make(chan []byte)
|
||||
i := -1
|
||||
|
||||
bus.Subscribe(pubsub.SubscribeRequest{Topic: "demo"}, func(ctx context.Context, msg *pubsub.NewMessage) error {
|
||||
i++
|
||||
if i < 5 {
|
||||
return errors.New("if at first you don't succeed")
|
||||
}
|
||||
|
||||
return publish(ch, msg)
|
||||
})
|
||||
|
||||
bus.Publish(&pubsub.PublishRequest{Data: []byte("ABCD"), Topic: "demo"})
|
||||
assert.Equal(t, "ABCD", string(<-ch))
|
||||
assert.Equal(t, 5, i)
|
||||
}
|
||||
|
||||
func publish(ch chan []byte, msg *pubsub.NewMessage) error {
|
||||
go func() { ch <- msg.Data }()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/dapr/kit/retry"
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
type jetstreamPubSub struct {
|
||||
nc *nats.Conn
|
||||
jsc nats.JetStreamContext
|
||||
l logger.Logger
|
||||
meta metadata
|
||||
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
backOffConfig retry.Config
|
||||
}
|
||||
|
||||
func NewJetStream(logger logger.Logger) pubsub.PubSub {
|
||||
return &jetstreamPubSub{l: logger}
|
||||
}
|
||||
|
||||
func (js *jetstreamPubSub) Init(metadata pubsub.Metadata) error {
|
||||
var err error
|
||||
js.meta, err = parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts []nats.Option
|
||||
opts = append(opts, nats.Name(js.meta.name))
|
||||
|
||||
js.nc, err = nats.Connect(js.meta.natsURL, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
js.l.Debugf("Connected to nats at %s", js.meta.natsURL)
|
||||
|
||||
js.jsc, err = js.nc.JetStream()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
js.ctx, js.ctxCancel = context.WithCancel(context.Background())
|
||||
|
||||
// Default retry configuration is used if no backOff properties are set.
|
||||
if err := retry.DecodeConfigWithPrefix(
|
||||
&js.backOffConfig,
|
||||
metadata.Properties,
|
||||
"backOff"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
js.l.Debug("JetStream initialization complete")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (js *jetstreamPubSub) Features() []pubsub.Feature {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (js *jetstreamPubSub) Publish(req *pubsub.PublishRequest) error {
|
||||
js.l.Debugf("Publishing topic %v with data: %v", req.Topic, req.Data)
|
||||
_, err := js.jsc.Publish(req.Topic, req.Data)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (js *jetstreamPubSub) Subscribe(req pubsub.SubscribeRequest, handler pubsub.Handler) error {
|
||||
var opts []nats.SubOpt
|
||||
|
||||
if v := js.meta.durableName; v != "" {
|
||||
opts = append(opts, nats.Durable(v))
|
||||
}
|
||||
|
||||
if v := js.meta.startTime; !v.IsZero() {
|
||||
opts = append(opts, nats.StartTime(v))
|
||||
} else if v := js.meta.startSequence; v > 0 {
|
||||
opts = append(opts, nats.StartSequence(v))
|
||||
} else if js.meta.deliverAll {
|
||||
opts = append(opts, nats.DeliverAll())
|
||||
} else {
|
||||
opts = append(opts, nats.DeliverLast())
|
||||
}
|
||||
|
||||
if js.meta.flowControl {
|
||||
opts = append(opts, nats.EnableFlowControl())
|
||||
}
|
||||
|
||||
natsHandler := func(m *nats.Msg) {
|
||||
jsm, err := m.Metadata()
|
||||
if err != nil {
|
||||
// If we get an error, then we don't have a valid JetStream
|
||||
// message.
|
||||
js.l.Error(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
operation := func() error {
|
||||
js.l.Debugf("Processing JetStream message %s/%d", m.Subject,
|
||||
jsm.Sequence)
|
||||
opErr := handler(js.ctx, &pubsub.NewMessage{
|
||||
Topic: m.Subject,
|
||||
Data: m.Data,
|
||||
})
|
||||
if opErr != nil {
|
||||
return opErr
|
||||
}
|
||||
|
||||
return m.Ack()
|
||||
}
|
||||
notify := func(nerr error, d time.Duration) {
|
||||
js.l.Errorf("Error processing JetStream message: %s/%d. Retrying...",
|
||||
m.Subject, jsm.Sequence)
|
||||
}
|
||||
recovered := func() {
|
||||
js.l.Infof("Successfully processed JetStream message after it previously failed: %s/%d",
|
||||
m.Subject, jsm.Sequence)
|
||||
}
|
||||
backOff := js.backOffConfig.NewBackOffWithContext(js.ctx)
|
||||
|
||||
err = retry.NotifyRecover(operation, backOff, notify, recovered)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
js.l.Errorf("Error processing message and retries are exhausted: %s/%d.",
|
||||
m.Subject, jsm.Sequence)
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if queue := js.meta.queueGroupName; queue != "" {
|
||||
js.l.Debugf("nats: subscribed to subject %s with queue group %s",
|
||||
req.Topic, js.meta.queueGroupName)
|
||||
_, err = js.jsc.QueueSubscribe(req.Topic, queue, natsHandler, opts...)
|
||||
} else {
|
||||
js.l.Debugf("nats: subscribed to subject %s", req.Topic)
|
||||
_, err = js.jsc.Subscribe(req.Topic, natsHandler, opts...)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (js *jetstreamPubSub) Close() error {
|
||||
js.ctxCancel()
|
||||
|
||||
return js.nc.Drain()
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
)
|
||||
|
||||
type metadata struct {
|
||||
natsURL string
|
||||
|
||||
name string
|
||||
durableName string
|
||||
queueGroupName string
|
||||
startSequence uint64
|
||||
startTime time.Time
|
||||
deliverAll bool
|
||||
flowControl bool
|
||||
}
|
||||
|
||||
func parseMetadata(psm pubsub.Metadata) (metadata, error) {
|
||||
var m metadata
|
||||
|
||||
if v, ok := psm.Properties["natsURL"]; ok && v != "" {
|
||||
m.natsURL = v
|
||||
} else {
|
||||
return metadata{}, fmt.Errorf("missing nats URL")
|
||||
}
|
||||
|
||||
if m.name = psm.Properties["name"]; m.name == "" {
|
||||
m.name = "dapr.io - pubsub.jetstream"
|
||||
}
|
||||
|
||||
m.durableName = psm.Properties["durableName"]
|
||||
m.queueGroupName = psm.Properties["queueGroupName"]
|
||||
|
||||
if v, err := strconv.ParseUint(psm.Properties["startSequence"], 10, 64); err == nil {
|
||||
m.startSequence = v
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseInt(psm.Properties["startTime"], 10, 64); err == nil {
|
||||
m.startTime = time.Unix(v, 0)
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseBool(psm.Properties["deliverAll"]); err == nil {
|
||||
m.deliverAll = v
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseBool(psm.Properties["flowControl"]); err == nil {
|
||||
m.flowControl = v
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
psm := pubsub.Metadata{
|
||||
Properties: map[string]string{
|
||||
"natsURL": "nats://localhost:4222",
|
||||
"name": "myName",
|
||||
"durableName": "myDurable",
|
||||
"queueGroupName": "myQueue",
|
||||
"startSequence": "1",
|
||||
"startTime": "1629328511",
|
||||
"deliverAll": "true",
|
||||
"flowControl": "true",
|
||||
},
|
||||
}
|
||||
|
||||
ts := time.Unix(1629328511, 0)
|
||||
|
||||
want := metadata{
|
||||
natsURL: "nats://localhost:4222",
|
||||
name: "myName",
|
||||
durableName: "myDurable",
|
||||
queueGroupName: "myQueue",
|
||||
startSequence: 1,
|
||||
startTime: ts,
|
||||
deliverAll: true,
|
||||
flowControl: true,
|
||||
}
|
||||
|
||||
got, err := parseMetadata(psm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unexpected metadata: got=%v, want=%v", got, want)
|
||||
}
|
||||
}
|
|
@ -36,6 +36,7 @@ type Kafka struct {
|
|||
authRequired bool
|
||||
saslUsername string
|
||||
saslPassword string
|
||||
initialOffset int64
|
||||
cg sarama.ConsumerGroup
|
||||
topics map[string]bool
|
||||
cancel context.CancelFunc
|
||||
|
@ -52,6 +53,7 @@ type kafkaMetadata struct {
|
|||
AuthRequired bool `json:"authRequired"`
|
||||
SaslUsername string `json:"saslUsername"`
|
||||
SaslPassword string `json:"saslPassword"`
|
||||
InitialOffset int64 `json:"initialOffset"`
|
||||
MaxMessageBytes int `json:"maxMessageBytes"`
|
||||
}
|
||||
|
||||
|
@ -120,9 +122,11 @@ func (k *Kafka) Init(metadata pubsub.Metadata) error {
|
|||
k.brokers = meta.Brokers
|
||||
k.consumerGroup = meta.ConsumerGroup
|
||||
k.authRequired = meta.AuthRequired
|
||||
k.initialOffset = meta.InitialOffset
|
||||
|
||||
config := sarama.NewConfig()
|
||||
config.Version = sarama.V2_0_0_0
|
||||
config.Consumer.Offsets.Initial = k.initialOffset
|
||||
|
||||
if meta.ClientID != "" {
|
||||
config.ClientID = meta.ClientID
|
||||
|
@ -295,6 +299,12 @@ func (k *Kafka) getKafkaMetadata(metadata pubsub.Metadata) (*kafkaMetadata, erro
|
|||
k.logger.Debugf("Using %s as ClientID", meta.ClientID)
|
||||
}
|
||||
|
||||
initialOffset, err := parseInitialOffset(metadata.Properties["initialOffset"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta.InitialOffset = initialOffset
|
||||
|
||||
if val, ok := metadata.Properties["brokers"]; ok && val != "" {
|
||||
meta.Brokers = strings.Split(val, ",")
|
||||
} else {
|
||||
|
@ -394,3 +404,16 @@ type asBase64String []byte
|
|||
func (s asBase64String) String() string {
|
||||
return base64.StdEncoding.EncodeToString(s)
|
||||
}
|
||||
|
||||
func parseInitialOffset(value string) (initialOffset int64, err error) {
|
||||
initialOffset = sarama.OffsetNewest // Default
|
||||
if strings.EqualFold(value, "oldest") {
|
||||
initialOffset = sarama.OffsetOldest
|
||||
} else if strings.EqualFold(value, "newest") {
|
||||
initialOffset = sarama.OffsetNewest
|
||||
} else if value != "" {
|
||||
return 0, fmt.Errorf("kafka error: invalid initialOffset: %s", value)
|
||||
}
|
||||
|
||||
return initialOffset, err
|
||||
}
|
||||
|
|
|
@ -8,9 +8,11 @@ package kafka
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getKafkaPubsub() *Kafka {
|
||||
|
@ -97,3 +99,16 @@ func TestInvalidAuthRequiredFlag(t *testing.T) {
|
|||
|
||||
assert.Equal(t, "kafka error: invalid value for 'authRequired' attribute", err.Error())
|
||||
}
|
||||
|
||||
func TestInitialOffset(t *testing.T) {
|
||||
m := pubsub.Metadata{}
|
||||
m.Properties = map[string]string{"consumerGroup": "a", "brokers": "a", "authRequired": "false", "initialOffset": "oldest"}
|
||||
k := getKafkaPubsub()
|
||||
meta, err := k.getKafkaMetadata(m)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, sarama.OffsetOldest, meta.InitialOffset)
|
||||
m.Properties["initialOffset"] = "newest"
|
||||
meta, err = k.getKafkaMetadata(m)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, sarama.OffsetNewest, meta.InitialOffset)
|
||||
}
|
||||
|
|
|
@ -45,6 +45,30 @@ func NewAzureKeyvaultSecretStore(logger logger.Logger) secretstores.SecretStore
|
|||
|
||||
// Init creates a Azure Key Vault client
|
||||
func (k *keyvaultSecretStore) Init(metadata secretstores.Metadata) error {
|
||||
// Fix for maintaining backwards compatibility with a change introduced in 1.3 that allowed specifying an Azure environment by setting a FQDN for vault name
|
||||
// This should be considered deprecated and users should rely the "azureEnvironment" metadata instead, but it's maintained here for backwards-compatibility
|
||||
if vaultName, ok := metadata.Properties[componentVaultName]; ok {
|
||||
keyVaultSuffixToEnvironment := map[string]string{
|
||||
".vault.azure.net": "AZUREPUBLICCLOUD",
|
||||
".vault.azure.cn": "AZURECHINACLOUD",
|
||||
".vault.usgovcloudapi.net": "AZUREUSGOVERNMENTCLOUD",
|
||||
".vault.microsoftazure.de": "AZUREGERMANCLOUD",
|
||||
}
|
||||
for suffix, environment := range keyVaultSuffixToEnvironment {
|
||||
if strings.HasSuffix(vaultName, suffix) {
|
||||
metadata.Properties["azureEnvironment"] = environment
|
||||
vaultName = strings.TrimSuffix(vaultName, suffix)
|
||||
if strings.HasPrefix(vaultName, "https://") {
|
||||
vaultName = strings.TrimPrefix(vaultName, "https://")
|
||||
}
|
||||
metadata.Properties[componentVaultName] = vaultName
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialization code
|
||||
settings, err := azauth.NewEnvironmentSettings("keyvault", metadata.Properties)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
package keyvault
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/secretstores"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
m := secretstores.Metadata{}
|
||||
s := NewAzureKeyvaultSecretStore(logger.NewLogger("test"))
|
||||
t.Run("Init with valid metadata", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"vaultName": "foo",
|
||||
"azureTenantId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientSecret": "passw0rd",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
kv, ok := s.(*keyvaultSecretStore)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, kv.vaultName, "foo")
|
||||
assert.Equal(t, kv.vaultDNSSuffix, "vault.azure.net")
|
||||
assert.NotNil(t, kv.vaultClient)
|
||||
assert.NotNil(t, kv.vaultClient.Authorizer)
|
||||
})
|
||||
t.Run("Init with valid metadata and Azure environment", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"vaultName": "foo",
|
||||
"azureTenantId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientSecret": "passw0rd",
|
||||
"azureEnvironment": "AZURECHINACLOUD",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
kv, ok := s.(*keyvaultSecretStore)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, kv.vaultName, "foo")
|
||||
assert.Equal(t, kv.vaultDNSSuffix, "vault.azure.cn")
|
||||
assert.NotNil(t, kv.vaultClient)
|
||||
assert.NotNil(t, kv.vaultClient.Authorizer)
|
||||
})
|
||||
t.Run("Init with Azure environment as part of vaultName FQDN (1) - legacy", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"vaultName": "foo.vault.azure.cn",
|
||||
"azureTenantId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientSecret": "passw0rd",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
kv, ok := s.(*keyvaultSecretStore)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, kv.vaultName, "foo")
|
||||
assert.Equal(t, kv.vaultDNSSuffix, "vault.azure.cn")
|
||||
assert.NotNil(t, kv.vaultClient)
|
||||
assert.NotNil(t, kv.vaultClient.Authorizer)
|
||||
})
|
||||
t.Run("Init with Azure environment as part of vaultName FQDN (2) - legacy", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"vaultName": "https://foo.vault.usgovcloudapi.net",
|
||||
"azureTenantId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientId": "00000000-0000-0000-0000-000000000000",
|
||||
"azureClientSecret": "passw0rd",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
kv, ok := s.(*keyvaultSecretStore)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, kv.vaultName, "foo")
|
||||
assert.Equal(t, kv.vaultDNSSuffix, "vault.usgovcloudapi.net")
|
||||
assert.NotNil(t, kv.vaultClient)
|
||||
assert.NotNil(t, kv.vaultClient.Authorizer)
|
||||
})
|
||||
}
|
|
@ -11,16 +11,19 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dapr/components-contrib/secretstores"
|
||||
"github.com/dapr/kit/config"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
type localSecretStoreMetaData struct {
|
||||
SecretsFile string `json:"secretsFile"`
|
||||
NestedSeparator string `json:"nestedSeparator"`
|
||||
SecretsFile string `mapstructure:"secretsFile"`
|
||||
NestedSeparator string `mapstructure:"nestedSeparator"`
|
||||
MultiValued bool `mapstructure:"multiValued"`
|
||||
}
|
||||
|
||||
type localSecretStore struct {
|
||||
|
@ -28,7 +31,7 @@ type localSecretStore struct {
|
|||
nestedSeparator string
|
||||
currenContext []string
|
||||
currentPath string
|
||||
secrets map[string]string
|
||||
secrets map[string]interface{}
|
||||
readLocalFileFn func(secretsFile string) (map[string]interface{}, error)
|
||||
logger logger.Logger
|
||||
}
|
||||
|
@ -57,14 +60,28 @@ func (j *localSecretStore) Init(metadata secretstores.Metadata) error {
|
|||
j.readLocalFileFn = j.readLocalFile
|
||||
}
|
||||
|
||||
j.secrets = map[string]string{}
|
||||
|
||||
jsonConfig, err := j.readLocalFileFn(meta.SecretsFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
j.visitJSONObject(jsonConfig)
|
||||
if meta.MultiValued {
|
||||
allSecrets := map[string]interface{}{}
|
||||
for k, v := range jsonConfig {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
allSecrets[k] = v
|
||||
case map[string]interface{}:
|
||||
j.secrets = make(map[string]interface{})
|
||||
j.visitJSONObject(v)
|
||||
allSecrets[k] = j.secrets
|
||||
}
|
||||
}
|
||||
j.secrets = allSecrets
|
||||
} else {
|
||||
j.secrets = map[string]interface{}{}
|
||||
j.visitJSONObject(jsonConfig)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -76,10 +93,25 @@ func (j *localSecretStore) GetSecret(req secretstores.GetSecretRequest) (secrets
|
|||
return secretstores.GetSecretResponse{}, fmt.Errorf("secret %s not found", req.Name)
|
||||
}
|
||||
|
||||
var data map[string]string
|
||||
switch v := secretValue.(type) {
|
||||
case string:
|
||||
data = map[string]string{
|
||||
req.Name: v,
|
||||
}
|
||||
case map[string]interface{}:
|
||||
data = make(map[string]string, len(v))
|
||||
for key, value := range v {
|
||||
data[key] = fmt.Sprint(value)
|
||||
}
|
||||
case map[string]string:
|
||||
data = v
|
||||
default:
|
||||
return secretstores.GetSecretResponse{}, fmt.Errorf("unexpected type %q for secret value", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
return secretstores.GetSecretResponse{
|
||||
Data: map[string]string{
|
||||
req.Name: secretValue,
|
||||
},
|
||||
Data: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -88,7 +120,22 @@ func (j *localSecretStore) BulkGetSecret(req secretstores.BulkGetSecretRequest)
|
|||
r := map[string]map[string]string{}
|
||||
|
||||
for k, v := range j.secrets {
|
||||
r[k] = map[string]string{k: v}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
r[k] = map[string]string{
|
||||
k: v,
|
||||
}
|
||||
case map[string]interface{}:
|
||||
data := make(map[string]string, len(v))
|
||||
for key, value := range v {
|
||||
data[key] = fmt.Sprint(value)
|
||||
}
|
||||
r[k] = data
|
||||
case map[string]string:
|
||||
r[k] = v
|
||||
default:
|
||||
return secretstores.BulkGetSecretResponse{}, fmt.Errorf("unexpected type %q for secret value", reflect.TypeOf(v))
|
||||
}
|
||||
}
|
||||
|
||||
return secretstores.BulkGetSecretResponse{
|
||||
|
@ -169,16 +216,12 @@ func (j *localSecretStore) combine(values []string) string {
|
|||
}
|
||||
|
||||
func (j *localSecretStore) getLocalSecretStoreMetadata(spec secretstores.Metadata) (*localSecretStoreMetaData, error) {
|
||||
b, err := json.Marshal(spec.Properties)
|
||||
var meta localSecretStoreMetaData
|
||||
err := config.Decode(spec.Properties, &meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var meta localSecretStoreMetaData
|
||||
err = json.Unmarshal(b, &meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if meta.SecretsFile == "" {
|
||||
return nil, fmt.Errorf("missing local secrets file in metadata")
|
||||
}
|
||||
|
|
|
@ -5,12 +5,14 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/secretstores"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const secretValue = "secret"
|
||||
|
@ -25,8 +27,8 @@ func TestInit(t *testing.T) {
|
|||
}
|
||||
t.Run("Init with valid metadata", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"SecretsFile": "a",
|
||||
"NestedSeparator": "a",
|
||||
"secretsFile": "a",
|
||||
"nestedSeparator": "a",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
|
@ -34,7 +36,7 @@ func TestInit(t *testing.T) {
|
|||
|
||||
t.Run("Init with missing metadata", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"Dummy": "a",
|
||||
"dummy": "a",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.NotNil(t, err)
|
||||
|
@ -57,8 +59,8 @@ func TestSeparator(t *testing.T) {
|
|||
}
|
||||
t.Run("Init with custom separator", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"SecretsFile": "a",
|
||||
"NestedSeparator": ".",
|
||||
"secretsFile": "a",
|
||||
"nestedSeparator": ".",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
|
@ -74,7 +76,7 @@ func TestSeparator(t *testing.T) {
|
|||
|
||||
t.Run("Init with default separator", func(t *testing.T) {
|
||||
m.Properties = map[string]string{
|
||||
"SecretsFile": "a",
|
||||
"secretsFile": "a",
|
||||
}
|
||||
err := s.Init(m)
|
||||
assert.Nil(t, err)
|
||||
|
@ -92,8 +94,8 @@ func TestSeparator(t *testing.T) {
|
|||
func TestGetSecret(t *testing.T) {
|
||||
m := secretstores.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"SecretsFile": "a",
|
||||
"NestedSeparator": "a",
|
||||
"secretsFile": "a",
|
||||
"nestedSeparator": "a",
|
||||
}
|
||||
s := localSecretStore{
|
||||
logger: logger.NewLogger("test"),
|
||||
|
@ -130,8 +132,8 @@ func TestGetSecret(t *testing.T) {
|
|||
func TestBulkGetSecret(t *testing.T) {
|
||||
m := secretstores.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"SecretsFile": "a",
|
||||
"NestedSeparator": "a",
|
||||
"secretsFile": "a",
|
||||
"nestedSeparator": "a",
|
||||
}
|
||||
s := localSecretStore{
|
||||
logger: logger.NewLogger("test"),
|
||||
|
@ -151,3 +153,60 @@ func TestBulkGetSecret(t *testing.T) {
|
|||
assert.Equal(t, "secret", output.Data["secret"]["secret"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultiValuedSecrets(t *testing.T) {
|
||||
m := secretstores.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"secretsFile": "a",
|
||||
"multiValued": "true",
|
||||
}
|
||||
s := localSecretStore{
|
||||
logger: logger.NewLogger("test"),
|
||||
readLocalFileFn: func(secretsFile string) (map[string]interface{}, error) {
|
||||
//nolint:gosec
|
||||
secretsJSON := `
|
||||
{
|
||||
"parent": {
|
||||
"child1": "12345",
|
||||
"child2": {
|
||||
"child3": "67890",
|
||||
"child4": "00000"
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
var secrets map[string]interface{}
|
||||
err := json.Unmarshal([]byte(secretsJSON), &secrets)
|
||||
|
||||
return secrets, err
|
||||
},
|
||||
}
|
||||
err := s.Init(m)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("successfully retrieve a single multi-valued secret", func(t *testing.T) {
|
||||
req := secretstores.GetSecretRequest{
|
||||
Name: "parent",
|
||||
}
|
||||
resp, err := s.GetSecret(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]string{
|
||||
"child1": "12345",
|
||||
"child2:child3": "67890",
|
||||
"child2:child4": "00000",
|
||||
}, resp.Data)
|
||||
})
|
||||
|
||||
t.Run("successfully retrieve multi-valued secrets", func(t *testing.T) {
|
||||
req := secretstores.BulkGetSecretRequest{}
|
||||
resp, err := s.BulkGetSecret(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]map[string]string{
|
||||
"parent": {
|
||||
"child1": "12345",
|
||||
"child2:child3": "67890",
|
||||
"child2:child4": "00000",
|
||||
},
|
||||
}, resp.Data)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ func (r *StateStore) Init(metadata state.Metadata) error {
|
|||
if ok && customEndpoint != "" {
|
||||
URL, parseErr := url.Parse(fmt.Sprintf("%s/%s/%s", customEndpoint, meta.accountName, meta.containerName))
|
||||
if parseErr != nil {
|
||||
return err
|
||||
return parseErr
|
||||
}
|
||||
containerURL = azblob.NewContainerURL(*URL, p)
|
||||
} else {
|
||||
|
|
|
@ -15,8 +15,10 @@ import (
|
|||
|
||||
"github.com/a8m/documentdb"
|
||||
"github.com/agrea/ptr"
|
||||
"github.com/google/uuid"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"github.com/dapr/components-contrib/authentication/azure"
|
||||
"github.com/dapr/components-contrib/contenttype"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
@ -99,9 +101,6 @@ func (c *StateStore) Init(meta state.Metadata) error {
|
|||
if m.URL == "" {
|
||||
return errors.New("url is required")
|
||||
}
|
||||
if m.MasterKey == "" {
|
||||
return errors.New("masterKey is required")
|
||||
}
|
||||
if m.Database == "" {
|
||||
return errors.New("database is required")
|
||||
}
|
||||
|
@ -112,11 +111,25 @@ func (c *StateStore) Init(meta state.Metadata) error {
|
|||
return errors.New("contentType is required")
|
||||
}
|
||||
|
||||
client := documentdb.New(m.URL, &documentdb.Config{
|
||||
MasterKey: &documentdb.Key{
|
||||
// Create the client; first, try authenticating with a master key, if present
|
||||
var config *documentdb.Config
|
||||
if m.MasterKey != "" {
|
||||
config = documentdb.NewConfig(&documentdb.Key{
|
||||
Key: m.MasterKey,
|
||||
},
|
||||
})
|
||||
})
|
||||
} else {
|
||||
// Fallback to using Azure AD
|
||||
env, errB := azure.NewEnvironmentSettings("cosmosdb", meta.Properties)
|
||||
if errB != nil {
|
||||
return errB
|
||||
}
|
||||
spt, errB := env.GetServicePrincipalToken()
|
||||
if errB != nil {
|
||||
return errB
|
||||
}
|
||||
config = documentdb.NewConfigWithServicePrincipal(spt)
|
||||
}
|
||||
client := documentdb.New(m.URL, config)
|
||||
|
||||
dbs, err := client.QueryDatabases(&documentdb.Query{
|
||||
Query: "SELECT * FROM ROOT r WHERE r.id=@id",
|
||||
|
@ -241,10 +254,10 @@ func (c *StateStore) Set(req *state.SetRequest) error {
|
|||
options := []documentdb.CallOption{documentdb.PartitionKey(partitionKey)}
|
||||
|
||||
if req.ETag != nil {
|
||||
var etag string
|
||||
if req.ETag != nil {
|
||||
etag = *req.ETag
|
||||
}
|
||||
options = append(options, documentdb.IfMatch((*req.ETag)))
|
||||
}
|
||||
if req.Options.Concurrency == state.FirstWrite && (req.ETag == nil || *req.ETag == "") {
|
||||
etag := uuid.NewString()
|
||||
options = append(options, documentdb.IfMatch((etag)))
|
||||
}
|
||||
if req.Options.Consistency == state.Strong {
|
||||
|
@ -258,7 +271,7 @@ func (c *StateStore) Set(req *state.SetRequest) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.client.UpsertDocument(c.collection.Self, doc, options...)
|
||||
_, err = c.client.UpsertDocument(c.collection.Self, &doc, options...)
|
||||
|
||||
if err != nil {
|
||||
if req.ETag != nil {
|
||||
|
@ -295,11 +308,7 @@ func (c *StateStore) Delete(req *state.DeleteRequest) error {
|
|||
}
|
||||
|
||||
if req.ETag != nil {
|
||||
var etag string
|
||||
if req.ETag != nil {
|
||||
etag = *req.ETag
|
||||
}
|
||||
options = append(options, documentdb.IfMatch((etag)))
|
||||
options = append(options, documentdb.IfMatch((*req.ETag)))
|
||||
}
|
||||
if req.Options.Consistency == state.Strong {
|
||||
options = append(options, documentdb.ConsistencyLevel(documentdb.Strong))
|
||||
|
|
|
@ -242,7 +242,7 @@ func (m *MySQL) ensureStateTable(stateTableName string) error {
|
|||
// in on inserts and updates and is used for Optimistic Concurrency
|
||||
createTable := fmt.Sprintf(`CREATE TABLE %s (
|
||||
id varchar(255) NOT NULL PRIMARY KEY,
|
||||
value json NOT NULL,
|
||||
value text NOT NULL,
|
||||
insertDate TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updateDate TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
eTag varchar(36) NOT NULL
|
||||
|
@ -312,7 +312,20 @@ func (m *MySQL) deleteValue(req *state.DeleteRequest) error {
|
|||
m.tableName), req.Key, *req.ETag)
|
||||
}
|
||||
|
||||
return m.returnNDBResults(result, err, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows != 1 && req.ETag != nil && *req.ETag != "" {
|
||||
return state.NewETagError(state.ETagMismatch, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BulkDelete removes multiple entries from the store
|
||||
|
@ -397,9 +410,35 @@ func (m *MySQL) setValue(req *state.SetRequest) error {
|
|||
m.tableName), value, eTag, req.Key, *req.ETag)
|
||||
}
|
||||
|
||||
// Have to pass 2 because if the insert has a conflict MySQL returns that
|
||||
// two rows affected
|
||||
return m.returnNDBResults(result, err, 2)
|
||||
if err != nil {
|
||||
if req.ETag != nil && *req.ETag != "" {
|
||||
return state.NewETagError(state.ETagMismatch, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows == 0 {
|
||||
err = fmt.Errorf(`rows affected error: no rows match given key '%s' and eTag '%s'`, req.Key, *req.ETag)
|
||||
err = state.NewETagError(state.ETagMismatch, err)
|
||||
m.logger.Error(err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if rows > 2 {
|
||||
err = fmt.Errorf(`rows affected error: more than 2 row affected, expected 2, actual %d`, rows)
|
||||
m.logger.Error(err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BulkSet adds/updates multiple entities on store
|
||||
|
@ -498,41 +537,3 @@ func (m *MySQL) executeMulti(sets []state.SetRequest, deletes []state.DeleteRequ
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
// Verifies that the sql.Result affected no more than n number of rows and no
|
||||
// errors exist. If zero rows were affected something is wrong and an error
|
||||
// is returned.
|
||||
func (m *MySQL) returnNDBResults(result sql.Result, err error, n int64) error {
|
||||
if err != nil {
|
||||
m.logger.Debug(err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
rowsAffected, resultErr := result.RowsAffected()
|
||||
|
||||
if resultErr != nil {
|
||||
m.logger.Error(resultErr)
|
||||
|
||||
return resultErr
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
noRowsErr := errors.New(
|
||||
`rows affected error: no rows match given key and eTag`)
|
||||
m.logger.Error(noRowsErr)
|
||||
|
||||
return noRowsErr
|
||||
}
|
||||
|
||||
if rowsAffected > n {
|
||||
tooManyRowsErr := fmt.Errorf(
|
||||
`rows affected error: more than %d row affected, expected %d, actual %d`,
|
||||
n, n, rowsAffected)
|
||||
m.logger.Error(tooManyRowsErr)
|
||||
|
||||
return tooManyRowsErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ package mysql
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
@ -274,6 +275,78 @@ func TestSetHandlesUpdate(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSetHandlesErr(t *testing.T) {
|
||||
// Arrange
|
||||
m, _ := mockDatabase(t)
|
||||
defer m.mySQL.Close()
|
||||
|
||||
t.Run("error occurs when update with tag", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("UPDATE state").WillReturnError(errors.New("error"))
|
||||
|
||||
eTag := "946af561"
|
||||
request := createSetRequest()
|
||||
request.ETag = &eTag
|
||||
|
||||
// Act
|
||||
err := m.mySQL.setValue(&request)
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.IsType(t, &state.ETagError{}, err)
|
||||
assert.Equal(t, err.(*state.ETagError).Kind(), state.ETagMismatch)
|
||||
})
|
||||
|
||||
t.Run("error occurs when insert", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("INSERT INTO state").WillReturnError(errors.New("error"))
|
||||
request := createSetRequest()
|
||||
|
||||
// Act
|
||||
err := m.mySQL.setValue(&request)
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "error", err.Error())
|
||||
})
|
||||
|
||||
t.Run("insert on conflict", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("INSERT INTO state").WillReturnResult(sqlmock.NewResult(1, 2))
|
||||
request := createSetRequest()
|
||||
|
||||
// Act
|
||||
err := m.mySQL.setValue(&request)
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("too many rows error", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("INSERT INTO state").WillReturnResult(sqlmock.NewResult(1, 3))
|
||||
request := createSetRequest()
|
||||
|
||||
// Act
|
||||
err := m.mySQL.setValue(&request)
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("no rows effected error", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("UPDATE state").WillReturnResult(sqlmock.NewResult(1, 0))
|
||||
|
||||
eTag := "illegal etag"
|
||||
request := createSetRequest()
|
||||
request.ETag = &eTag
|
||||
|
||||
// Act
|
||||
err := m.mySQL.setValue(&request)
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.IsType(t, &state.ETagError{}, err)
|
||||
assert.Equal(t, err.(*state.ETagError).Kind(), state.ETagMismatch)
|
||||
})
|
||||
}
|
||||
|
||||
// Verifies that MySQL passes through to myDBAccess
|
||||
func TestMySQLDeleteHandlesNoKey(t *testing.T) {
|
||||
// Arrange
|
||||
|
@ -296,8 +369,7 @@ func TestDeleteWithETag(t *testing.T) {
|
|||
|
||||
m.mock1.ExpectExec("DELETE FROM").WillReturnResult(sqlmock.NewResult(0, 1))
|
||||
|
||||
eTag := "946af56e"
|
||||
|
||||
eTag := "946af562"
|
||||
request := createDeleteRequest()
|
||||
request.ETag = &eTag
|
||||
|
||||
|
@ -308,59 +380,39 @@ func TestDeleteWithETag(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestReturnNDBResultsRowsAffectedReturnsError(t *testing.T) {
|
||||
func TestDeleteWithErr(t *testing.T) {
|
||||
// Arrange
|
||||
m, _ := mockDatabase(t)
|
||||
defer m.mySQL.Close()
|
||||
|
||||
request := &fakeSQLRequest{
|
||||
rowsAffected: 3,
|
||||
lastInsertID: 0,
|
||||
err: fmt.Errorf("RowAffectedError"),
|
||||
}
|
||||
t.Run("error occurs when delete", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("DELETE FROM").WillReturnError(errors.New("error"))
|
||||
|
||||
// Act
|
||||
err := m.mySQL.returnNDBResults(request, nil, 2)
|
||||
request := createDeleteRequest()
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "RowAffectedError", err.Error())
|
||||
}
|
||||
// Act
|
||||
err := m.mySQL.deleteValue(&request)
|
||||
|
||||
func TestReturnNDBResultsNoRows(t *testing.T) {
|
||||
// Arrange
|
||||
m, _ := mockDatabase(t)
|
||||
defer m.mySQL.Close()
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "error", err.Error())
|
||||
})
|
||||
|
||||
request := &fakeSQLRequest{
|
||||
rowsAffected: 0,
|
||||
lastInsertID: 0,
|
||||
}
|
||||
t.Run("etag mismatch", func(t *testing.T) {
|
||||
m.mock1.ExpectExec("DELETE FROM").WillReturnResult(sqlmock.NewResult(0, 0))
|
||||
|
||||
// Act
|
||||
err := m.mySQL.returnNDBResults(request, nil, 2)
|
||||
eTag := "946af563"
|
||||
request := createDeleteRequest()
|
||||
request.ETag = &eTag
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "rows affected error: no rows match given key and eTag", err.Error())
|
||||
}
|
||||
// Act
|
||||
err := m.mySQL.deleteValue(&request)
|
||||
|
||||
func TestReturnNDBResultsTooManyRows(t *testing.T) {
|
||||
// Arrange
|
||||
m, _ := mockDatabase(t)
|
||||
defer m.mySQL.Close()
|
||||
|
||||
request := &fakeSQLRequest{
|
||||
rowsAffected: 3,
|
||||
lastInsertID: 0,
|
||||
}
|
||||
|
||||
// Act
|
||||
err := m.mySQL.returnNDBResults(request, nil, 2)
|
||||
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "rows affected error: more than 2 row affected, expected 2, actual 3", err.Error())
|
||||
// Assert
|
||||
assert.NotNil(t, err)
|
||||
assert.IsType(t, &state.ETagError{}, err)
|
||||
assert.Equal(t, err.(*state.ETagError).Kind(), state.ETagMismatch)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetHandlesNoRows(t *testing.T) {
|
||||
|
@ -740,20 +792,6 @@ func TestInvalidMultiDeleteRequest(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
type fakeSQLRequest struct {
|
||||
lastInsertID int64
|
||||
rowsAffected int64
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeSQLRequest) LastInsertId() (int64, error) {
|
||||
return f.lastInsertID, f.err
|
||||
}
|
||||
|
||||
func (f *fakeSQLRequest) RowsAffected() (int64, error) {
|
||||
return f.rowsAffected, f.err
|
||||
}
|
||||
|
||||
func createSetRequest() state.SetRequest {
|
||||
return state.SetRequest{
|
||||
Key: randomKey(),
|
||||
|
|
|
@ -126,8 +126,7 @@ func (m *migration) executeMigrations() (migrationResult, error) {
|
|||
}
|
||||
|
||||
func runCommand(tsql string, db *sql.DB) error {
|
||||
_, err := db.Exec(tsql)
|
||||
if err != nil {
|
||||
if _, err := db.Exec(tsql); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -272,35 +271,85 @@ func (m *migration) createStoredProcedureIfNotExists(db *sql.DB, name string, es
|
|||
/* #nosec */
|
||||
func (m *migration) ensureUpsertStoredProcedureExists(db *sql.DB, mr migrationResult) error {
|
||||
tsql := fmt.Sprintf(`
|
||||
CREATE PROCEDURE %s (
|
||||
@Key %s,
|
||||
@Data NVARCHAR(MAX),
|
||||
@RowVersion BINARY(8))
|
||||
AS
|
||||
IF (@RowVersion IS NOT NULL)
|
||||
BEGIN
|
||||
UPDATE [%s]
|
||||
SET [Data]=@Data, UpdateDate=GETDATE()
|
||||
WHERE [Key]=@Key AND RowVersion = @RowVersion
|
||||
|
||||
RETURN
|
||||
END
|
||||
|
||||
BEGIN TRY
|
||||
INSERT INTO [%s] ([Key], [Data]) VALUES (@Key, @Data);
|
||||
END TRY
|
||||
|
||||
BEGIN CATCH
|
||||
IF ERROR_NUMBER() IN (2601, 2627)
|
||||
UPDATE [%s]
|
||||
SET [Data]=@Data, UpdateDate=GETDATE()
|
||||
WHERE [Key]=@Key AND RowVersion = ISNULL(@RowVersion, RowVersion)
|
||||
END CATCH`,
|
||||
CREATE PROCEDURE %s (
|
||||
@Key %s,
|
||||
@Data NVARCHAR(MAX),
|
||||
@RowVersion BINARY(8),
|
||||
@FirstWrite BIT)
|
||||
AS
|
||||
IF (@FirstWrite=1)
|
||||
BEGIN
|
||||
IF (@RowVersion IS NOT NULL)
|
||||
BEGIN
|
||||
BEGIN TRANSACTION;
|
||||
IF NOT EXISTS (SELECT * FROM [%s] WHERE [KEY]=@KEY AND RowVersion = @RowVersion)
|
||||
BEGIN
|
||||
THROW 2601, ''FIRST-WRITE: COMPETING RECORD ALREADY WRITTEN.'', 1
|
||||
END
|
||||
BEGIN
|
||||
UPDATE [%s]
|
||||
SET [Data]=@Data, UpdateDate=GETDATE()
|
||||
WHERE [Key]=@Key AND RowVersion = @RowVersion
|
||||
END
|
||||
COMMIT;
|
||||
END
|
||||
ELSE
|
||||
BEGIN
|
||||
BEGIN TRANSACTION;
|
||||
IF EXISTS (SELECT * FROM [%s] WHERE [KEY]=@KEY)
|
||||
BEGIN
|
||||
THROW 2601, ''FIRST-WRITE: COMPETING RECORD ALREADY WRITTEN.'', 1
|
||||
END
|
||||
BEGIN
|
||||
BEGIN TRY
|
||||
INSERT INTO [%s] ([Key], [Data]) VALUES (@Key, @Data);
|
||||
END TRY
|
||||
|
||||
BEGIN CATCH
|
||||
IF ERROR_NUMBER() IN (2601, 2627)
|
||||
UPDATE [%s]
|
||||
SET [Data]=@Data, UpdateDate=GETDATE()
|
||||
WHERE [Key]=@Key AND RowVersion = ISNULL(@RowVersion, RowVersion)
|
||||
END CATCH
|
||||
END
|
||||
COMMIT;
|
||||
END
|
||||
END
|
||||
ELSE
|
||||
BEGIN
|
||||
IF (@RowVersion IS NOT NULL)
|
||||
BEGIN
|
||||
UPDATE [%s]
|
||||
SET [Data]=@Data, UpdateDate=GETDATE()
|
||||
WHERE [Key]=@Key AND RowVersion = @RowVersion
|
||||
RETURN
|
||||
END
|
||||
ELSE
|
||||
BEGIN
|
||||
BEGIN TRY
|
||||
INSERT INTO [%s] ([Key], [Data]) VALUES (@Key, @Data);
|
||||
END TRY
|
||||
|
||||
BEGIN CATCH
|
||||
IF ERROR_NUMBER() IN (2601, 2627)
|
||||
UPDATE [%s]
|
||||
SET [Data]=@Data, UpdateDate=GETDATE()
|
||||
WHERE [Key]=@Key AND RowVersion = ISNULL(@RowVersion, RowVersion)
|
||||
END CATCH
|
||||
END
|
||||
END
|
||||
`,
|
||||
mr.upsertProcFullName,
|
||||
mr.pkColumnType,
|
||||
m.store.tableName,
|
||||
m.store.tableName,
|
||||
m.store.tableName)
|
||||
m.store.tableName,
|
||||
m.store.tableName,
|
||||
m.store.tableName,
|
||||
m.store.tableName,
|
||||
m.store.tableName,
|
||||
m.store.tableName,
|
||||
)
|
||||
|
||||
return m.createStoredProcedureIfNotExists(db, mr.upsertProcName, tsql)
|
||||
}
|
||||
|
|
|
@ -440,23 +440,23 @@ func (s *SQLServer) Delete(req *state.DeleteRequest) error {
|
|||
res, err = s.db.Exec(s.deleteWithoutETagCommand, sql.Named(keyColumnName, req.Key))
|
||||
}
|
||||
|
||||
// err represents errors thrown by the stored procedure or the database itself
|
||||
if err != nil {
|
||||
if req.ETag != nil {
|
||||
return state.NewETagError(state.ETagMismatch, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// if the row with matching key (and ETag if specified) is not found, then the stored procedure returns 0 rows affected
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows != 1 {
|
||||
return fmt.Errorf("items was not updated")
|
||||
// When an ETAG is specified, a row must have been deleted or else we return an ETag mismatch error
|
||||
if rows != 1 && req.ETag != nil && *req.ETag != "" {
|
||||
return state.NewETagError(state.ETagMismatch, nil)
|
||||
}
|
||||
|
||||
// successful deletion, or noop if no ETAG specified
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -578,15 +578,22 @@ func (s *SQLServer) executeSet(db dbExecutor, req *state.SetRequest) error {
|
|||
return err
|
||||
}
|
||||
etag := sql.Named(rowVersionColumnName, nil)
|
||||
if req.ETag != nil {
|
||||
if req.ETag != nil && *req.ETag != "" {
|
||||
var b []byte
|
||||
b, err = hex.DecodeString(*req.ETag)
|
||||
if err != nil {
|
||||
return state.NewETagError(state.ETagInvalid, err)
|
||||
}
|
||||
etag.Value = b
|
||||
etag = sql.Named(rowVersionColumnName, b)
|
||||
}
|
||||
res, err := db.Exec(s.upsertCommand, sql.Named(keyColumnName, req.Key), sql.Named("Data", string(bytes)), etag)
|
||||
|
||||
var res sql.Result
|
||||
if req.Options.Concurrency == state.FirstWrite {
|
||||
res, err = db.Exec(s.upsertCommand, sql.Named(keyColumnName, req.Key), sql.Named("Data", string(bytes)), etag, sql.Named("FirstWrite", 1))
|
||||
} else {
|
||||
res, err = db.Exec(s.upsertCommand, sql.Named(keyColumnName, req.Key), sql.Named("Data", string(bytes)), etag, sql.Named("FirstWrite", 0))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if req.ETag != nil && *req.ETag != "" {
|
||||
return state.NewETagError(state.ETagMismatch, err)
|
||||
|
|
|
@ -8,13 +8,13 @@ spec:
|
|||
version: v1
|
||||
metadata:
|
||||
- name: connectionString
|
||||
value: ${{AzureEventHubsConnectionString}}
|
||||
value: ${{AzureEventHubsBindingsConnectionString}}
|
||||
- name: consumerGroup
|
||||
value: ${{AzureEventHubsConsumerGroup}}
|
||||
value: ${{AzureEventHubsBindingsConsumerGroup}}
|
||||
# Reuse the blob storage account from the storage bindings conformance test
|
||||
- name: storageAccountName
|
||||
value: ${{AzureBlobStorageAccount}}
|
||||
- name: storageAccountKey
|
||||
value: ${{AzureBlobStorageAccessKey}}
|
||||
- name: storageContainerName
|
||||
value: ${{AzureBlobStorageContainer}}
|
||||
value: ${{AzureEventHubsBindingsContainer}}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: influx
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.influx
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
value: http://localhost:8086
|
||||
- name: token # Required
|
||||
value: ${{ INFLUX_TOKEN }}
|
||||
- name: org # Required
|
||||
value: dapr-conf-test
|
||||
- name: bucket # Required
|
||||
value: dapr-conf-test-bucket
|
|
@ -16,4 +16,6 @@ spec:
|
|||
- name: publishTopic # Output binding topic
|
||||
value: binding-topic
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
value: "false"
|
||||
- name: initialOffset
|
||||
value: oldest
|
|
@ -35,6 +35,10 @@ components:
|
|||
config:
|
||||
url: "localhost:22222"
|
||||
method: "POST"
|
||||
- component: influx
|
||||
operations: ["create", "operations"]
|
||||
config:
|
||||
outputData: '{ "measurement": "TestMeasurement", "tags": "unit=temperature", "values": "avg=23.5" }'
|
||||
- component: mqtt
|
||||
profile: mosquitto
|
||||
operations: ["create", "operations", "read"]
|
||||
|
|
|
@ -8,13 +8,13 @@ spec:
|
|||
version: v1
|
||||
metadata:
|
||||
- name: connectionString
|
||||
value: ${{AzureEventHubsConnectionString}}
|
||||
value: ${{AzureEventHubsPubsubConnectionString}}
|
||||
- name: consumerID
|
||||
value: ${{AzureEventHubsConsumerGroup}}
|
||||
value: ${{AzureEventHubsPubsubConsumerGroup}}
|
||||
# Reuse the blob storage account from the storage bindings conformance test
|
||||
- name: storageAccountName
|
||||
value: ${{AzureBlobStorageAccount}}
|
||||
- name: storageAccountKey
|
||||
value: ${{AzureBlobStorageAccessKey}}
|
||||
- name: storageContainerName
|
||||
value: ${{AzureBlobStorageContainer}}
|
||||
value: ${{AzureEventHubsPubsubContainer}}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: pubsub
|
||||
namespace: default
|
||||
spec:
|
||||
type: pubsub.in-memory
|
||||
version: v1
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: pubsub
|
||||
spec:
|
||||
type: pubsub.jetstream
|
||||
version: v1
|
||||
metadata:
|
||||
- name: natsURL
|
||||
value: "nats://localhost:4222"
|
||||
- name: name
|
||||
value: config-test
|
||||
- name: flowControl
|
||||
value: true
|
|
@ -11,4 +11,6 @@ spec:
|
|||
- name: consumerGroup
|
||||
value: pubsubgroup1
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
value: "false"
|
||||
- name: initialOffset
|
||||
value: oldest
|
|
@ -1,6 +1,6 @@
|
|||
# Supported operation: publish, subscribe
|
||||
# Supported operation: publish, subscribe
|
||||
# Config map:
|
||||
## pubsubName : name of the pubsub
|
||||
## pubsubName : name of the pubsub
|
||||
## testTopicName: name of the test topic to use
|
||||
## publish: A map of strings that will be part of the publish metadata in the Publish call
|
||||
## subscribe: A map of strings that will be part of the subscribe metadata in the Subscribe call
|
||||
|
@ -25,6 +25,8 @@ components:
|
|||
checkInOrderProcessing: false
|
||||
- component: natsstreaming
|
||||
allOperations: true
|
||||
- component: jetstream
|
||||
allOperations: true
|
||||
- component: kafka
|
||||
allOperations: true
|
||||
- component: pulsar
|
||||
|
@ -44,3 +46,5 @@ components:
|
|||
allOperations: true
|
||||
config:
|
||||
checkInOrderProcessing: false
|
||||
- component: in-memory
|
||||
allOperations: true
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: statestore
|
||||
spec:
|
||||
type: state.mysql
|
||||
metadata:
|
||||
- name: connectionString
|
||||
value: "dapr:example@tcp(localhost:3306)/?allowNativePasswords=true"
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: statestore
|
||||
spec:
|
||||
type: state.sqlserver
|
||||
metadata:
|
||||
- name: connectionString
|
||||
value: "server=localhost;user id=sa;password=Pass@Word1;port=1433;"
|
||||
- name: tableName
|
||||
value: mytable
|
|
@ -7,3 +7,8 @@ components:
|
|||
allOperations: true
|
||||
- component: cosmosdb
|
||||
allOperations: true
|
||||
- component: sqlserver
|
||||
allOperations: true
|
||||
- component: mysql
|
||||
allOperations: false
|
||||
operations: [ "set", "get", "delete", "bulkset", "bulkdelete", "transaction", "etag" ]
|
||||
|
|
|
@ -81,3 +81,42 @@
|
|||
# COMPONENT_NAME is the component name from the tests.yml file, e.g. azure.servicebus, redis, mongodb etc.
|
||||
go test -v -tags=conftests -count=1 ./tests/conformance -run="${TEST_NAME}/${COMPONENT_NAME}"
|
||||
```
|
||||
|
||||
### Debug conformance tests
|
||||
|
||||
To run all conformance tests
|
||||
|
||||
```bash
|
||||
dlv test --build-flags '-v -tags=conftests' ./tests/conformance
|
||||
```
|
||||
|
||||
To run a specific conformance test
|
||||
|
||||
```bash
|
||||
dlv test --build-flags '-v -tags=conftests' ./tests/conformance -- -test.run "TestStateConformance/redis"
|
||||
```
|
||||
|
||||
If you want to combine VS Code & dlv for debugging so you can set breakpoints in the IDE, create a debug launch configuration as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch test function",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "test",
|
||||
"program": "${workspaceFolder}/tests/conformance",
|
||||
"buildFlags": "-v -tags=conftests",
|
||||
"env": {
|
||||
"SOMETHING_REQUIRED_BY_THE_TEST": "<somevalue>"
|
||||
},
|
||||
"args": [
|
||||
"-test.run",
|
||||
"TestStateConformance/redis",
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
```
|
|
@ -6,6 +6,9 @@
|
|||
package bindings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -22,6 +25,9 @@ import (
|
|||
const (
|
||||
defaultTimeoutDuration = 60 * time.Second
|
||||
defaultWaitDuration = time.Second
|
||||
|
||||
// Use CloudEvent as default data because it is required by Azure's EventGrid.
|
||||
defaultOutputData = "[{\"eventType\":\"test\",\"eventTime\": \"2018-01-25T22:12:19.4556811Z\",\"subject\":\"dapr-conf-tests\",\"id\":\"A234-1234-1234\",\"data\":\"root/>\"}]"
|
||||
)
|
||||
|
||||
// nolint:gochecknoglobals
|
||||
|
@ -38,6 +44,7 @@ type TestConfig struct {
|
|||
URL string `mapstructure:"url"`
|
||||
InputMetadata map[string]string `mapstructure:"input"`
|
||||
OutputMetadata map[string]string `mapstructure:"output"`
|
||||
OutputData string `mapstructure:"outputData"`
|
||||
ReadBindingTimeout time.Duration `mapstructure:"readBindingTimeout"`
|
||||
ReadBindingWait time.Duration `mapstructure:"readBindingWait"`
|
||||
}
|
||||
|
@ -53,6 +60,7 @@ func NewTestConfig(name string, allOperations bool, operations []string, configM
|
|||
},
|
||||
InputMetadata: make(map[string]string),
|
||||
OutputMetadata: make(map[string]string),
|
||||
OutputData: defaultOutputData,
|
||||
ReadBindingTimeout: defaultTimeoutDuration,
|
||||
ReadBindingWait: defaultWaitDuration,
|
||||
}
|
||||
|
@ -87,11 +95,8 @@ func startHTTPServer(url string) {
|
|||
func (tc *TestConfig) createInvokeRequest() bindings.InvokeRequest {
|
||||
// There is a possibility that the metadata map might be modified by the Invoke function(eg: azure blobstorage).
|
||||
// So we are making a copy of the config metadata map and setting the Metadata field before each request
|
||||
// Use CloudEvent as data because it is required by Azure's EventGrid.
|
||||
cloudEvent := "[{\"eventType\":\"test\",\"eventTime\": \"2018-01-25T22:12:19.4556811Z\",\"subject\":\"dapr-conf-tests\",\"id\":\"A234-1234-1234\",\"data\":\"root/>\"}]"
|
||||
|
||||
return bindings.InvokeRequest{
|
||||
Data: []byte(cloudEvent),
|
||||
Data: []byte(tc.OutputData),
|
||||
Metadata: tc.CopyMap(tc.OutputMetadata),
|
||||
}
|
||||
}
|
||||
|
@ -155,7 +160,7 @@ func ConformanceTests(t *testing.T, props map[string]string, inputBinding bindin
|
|||
|
||||
return nil, nil
|
||||
})
|
||||
assert.NoError(t, err, "input binding read returned an error")
|
||||
assert.True(t, err == nil || errors.Is(err, context.Canceled), "expected Read canceled on Close")
|
||||
}()
|
||||
})
|
||||
// Special case for message brokers that are also bindings
|
||||
|
@ -230,4 +235,21 @@ func ConformanceTests(t *testing.T, props map[string]string, inputBinding bindin
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("close", func(t *testing.T) {
|
||||
// Check for an input-binding specific operation before close
|
||||
if config.HasOperation("read") {
|
||||
if closer, ok := inputBinding.(io.Closer); ok {
|
||||
err := closer.Close()
|
||||
assert.NoError(t, err, "expected no error closing input binding")
|
||||
}
|
||||
}
|
||||
// Check for an output-binding specific operation before close
|
||||
if config.HasOperation("operations") {
|
||||
if closer, ok := outputBinding.(io.Closer); ok {
|
||||
err := closer.Close()
|
||||
assert.NoError(t, err, "expected no error closing output binding")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -32,12 +32,15 @@ import (
|
|||
b_azure_servicebusqueues "github.com/dapr/components-contrib/bindings/azure/servicebusqueues"
|
||||
b_azure_storagequeues "github.com/dapr/components-contrib/bindings/azure/storagequeues"
|
||||
b_http "github.com/dapr/components-contrib/bindings/http"
|
||||
b_influx "github.com/dapr/components-contrib/bindings/influx"
|
||||
b_kafka "github.com/dapr/components-contrib/bindings/kafka"
|
||||
b_mqtt "github.com/dapr/components-contrib/bindings/mqtt"
|
||||
b_redis "github.com/dapr/components-contrib/bindings/redis"
|
||||
p_eventhubs "github.com/dapr/components-contrib/pubsub/azure/eventhubs"
|
||||
p_servicebus "github.com/dapr/components-contrib/pubsub/azure/servicebus"
|
||||
p_hazelcast "github.com/dapr/components-contrib/pubsub/hazelcast"
|
||||
p_inmemory "github.com/dapr/components-contrib/pubsub/in-memory"
|
||||
p_jetstream "github.com/dapr/components-contrib/pubsub/jetstream"
|
||||
p_kafka "github.com/dapr/components-contrib/pubsub/kafka"
|
||||
p_mqtt "github.com/dapr/components-contrib/pubsub/mqtt"
|
||||
p_natsstreaming "github.com/dapr/components-contrib/pubsub/natsstreaming"
|
||||
|
@ -50,7 +53,9 @@ import (
|
|||
ss_local_file "github.com/dapr/components-contrib/secretstores/local/file"
|
||||
s_cosmosdb "github.com/dapr/components-contrib/state/azure/cosmosdb"
|
||||
s_mongodb "github.com/dapr/components-contrib/state/mongodb"
|
||||
s_mysql "github.com/dapr/components-contrib/state/mysql"
|
||||
s_redis "github.com/dapr/components-contrib/state/redis"
|
||||
s_sqlserver "github.com/dapr/components-contrib/state/sqlserver"
|
||||
conf_bindings "github.com/dapr/components-contrib/tests/conformance/bindings"
|
||||
conf_pubsub "github.com/dapr/components-contrib/tests/conformance/pubsub"
|
||||
conf_secret "github.com/dapr/components-contrib/tests/conformance/secretstores"
|
||||
|
@ -319,6 +324,8 @@ func loadPubSub(tc TestComponent) pubsub.PubSub {
|
|||
pubsub = p_servicebus.NewAzureServiceBus(testLogger)
|
||||
case "natsstreaming":
|
||||
pubsub = p_natsstreaming.NewNATSStreamingPubSub(testLogger)
|
||||
case "jetstream":
|
||||
pubsub = p_jetstream.NewJetStream(testLogger)
|
||||
case kafka:
|
||||
pubsub = p_kafka.NewKafka(testLogger)
|
||||
case "pulsar":
|
||||
|
@ -329,6 +336,9 @@ func loadPubSub(tc TestComponent) pubsub.PubSub {
|
|||
pubsub = p_hazelcast.NewHazelcastPubSub(testLogger)
|
||||
case "rabbitmq":
|
||||
pubsub = p_rabbitmq.NewRabbitMQ(testLogger)
|
||||
case "in-memory":
|
||||
pubsub = p_inmemory.New(testLogger)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -363,6 +373,10 @@ func loadStateStore(tc TestComponent) state.Store {
|
|||
store = s_cosmosdb.NewCosmosDBStateStore(testLogger)
|
||||
case "mongodb":
|
||||
store = s_mongodb.NewMongoDB(testLogger)
|
||||
case "sqlserver":
|
||||
store = s_sqlserver.NewSQLServerStateStore(testLogger)
|
||||
case "mysql":
|
||||
store = s_mysql.NewMySQLStateStore(testLogger)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -390,6 +404,8 @@ func loadOutputBindings(tc TestComponent) bindings.OutputBinding {
|
|||
binding = b_kafka.NewKafka(testLogger)
|
||||
case "http":
|
||||
binding = b_http.NewHTTP(testLogger)
|
||||
case "influx":
|
||||
binding = b_influx.NewInflux(testLogger)
|
||||
case mqtt:
|
||||
binding = b_mqtt.NewMQTT(testLogger)
|
||||
default:
|
||||
|
|
|
@ -235,6 +235,7 @@ func ConformanceTests(t *testing.T, props map[string]string, statestore state.St
|
|||
t.Run("delete", func(t *testing.T) {
|
||||
for _, scenario := range scenarios {
|
||||
if !scenario.bulkOnly && scenario.toBeDeleted {
|
||||
// this also deletes two keys that were not inserted in the set operation
|
||||
t.Logf("Deleting %s", scenario.key)
|
||||
err := statestore.Delete(&state.DeleteRequest{
|
||||
Key: scenario.key,
|
||||
|
|
Loading…
Reference in New Issue