Merge branch 'master' into content-type
This commit is contained in:
commit
1acb76ee1c
|
@ -3,6 +3,6 @@
|
|||
# Licensed under the MIT License.
|
||||
# ------------------------------------------------------------
|
||||
|
||||
FROM daprio/dapr-dev:0.1.3
|
||||
FROM daprio/dapr-dev:0.1.4
|
||||
|
||||
VOLUME [ "/go/src/github.com/dapr/dapr" ]
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
|
||||
param iotHubName string
|
||||
param rgLocation string = resourceGroup().location
|
||||
param confTestTags object = {}
|
||||
|
||||
var iotHubBindingsConsumerGroupName = '${iotHubName}/events/bindings-cg'
|
||||
var iotHubPubsubConsumerGroupName = '${iotHubName}/events/pubsub-cg'
|
||||
|
||||
resource iotHub 'Microsoft.Devices/IotHubs@2021-03-31' = {
|
||||
name: iotHubName
|
||||
location: rgLocation
|
||||
tags: confTestTags
|
||||
sku: {
|
||||
capacity: 1
|
||||
name: 'S1'
|
||||
}
|
||||
properties: {
|
||||
eventHubEndpoints: {
|
||||
events: {
|
||||
retentionTimeInDays: 1
|
||||
partitionCount: 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource iotHubBindingsConsumerGroup 'Microsoft.Devices/IotHubs/eventHubEndpoints/ConsumerGroups@2021-03-31' = {
|
||||
name: iotHubBindingsConsumerGroupName
|
||||
properties: {
|
||||
name: 'bindings-cg'
|
||||
}
|
||||
dependsOn: [
|
||||
iotHub
|
||||
]
|
||||
}
|
||||
|
||||
resource iotHubPubsubConsumerGroup 'Microsoft.Devices/IotHubs/eventHubEndpoints/ConsumerGroups@2021-03-31' = {
|
||||
name: iotHubPubsubConsumerGroupName
|
||||
properties: {
|
||||
name: 'pubsub-cg'
|
||||
}
|
||||
dependsOn: [
|
||||
iotHub
|
||||
]
|
||||
}
|
||||
|
||||
output iotHubBindingsConsumerGroupName string = iotHubBindingsConsumerGroup.name
|
||||
output iotHubPubsubConsumerGroupName string = iotHubPubsubConsumerGroup.name
|
|
@ -0,0 +1,25 @@
|
|||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
|
||||
param sqlServerName string
|
||||
param rgLocation string = resourceGroup().location
|
||||
param confTestTags object = {}
|
||||
param sqlServerAdminPassword string
|
||||
|
||||
var sqlServerAdminName = '${sqlServerName}-admin'
|
||||
|
||||
resource sqlServer 'Microsoft.Sql/servers@2021-02-01-preview' = {
|
||||
name: sqlServerName
|
||||
location: rgLocation
|
||||
tags: confTestTags
|
||||
properties: {
|
||||
administratorLogin: sqlServerAdminName
|
||||
administratorLoginPassword: sqlServerAdminPassword
|
||||
minimalTlsVersion: '1.2'
|
||||
publicNetworkAccess: 'Enabled'
|
||||
}
|
||||
}
|
||||
|
||||
output sqlServerAdminName string = sqlServer.properties.administratorLogin
|
|
@ -33,12 +33,18 @@ param sdkAuthSpId string
|
|||
@description('Provide the objectId of the Service Principal using cert auth with get and list access to all assets in Azure Key Vault.')
|
||||
param certAuthSpId string
|
||||
|
||||
@minLength(16)
|
||||
@description('Provide the SQL server admin password of at least 16 characters.')
|
||||
param sqlServerAdminPassword string
|
||||
|
||||
var confTestRgName = '${toLower(namePrefix)}-conf-test-rg'
|
||||
var cosmosDbName = '${toLower(namePrefix)}-conf-test-db'
|
||||
var eventGridTopicName = '${toLower(namePrefix)}-conf-test-eventgrid-topic'
|
||||
var eventHubsNamespaceName = '${toLower(namePrefix)}-conf-test-eventhubs'
|
||||
var iotHubName = '${toLower(namePrefix)}-conf-test-iothub'
|
||||
var keyVaultName = '${toLower(namePrefix)}-conf-test-kv'
|
||||
var serviceBusName = '${toLower(namePrefix)}-conf-test-servicebus'
|
||||
var sqlServerName = '${toLower(namePrefix)}-conf-test-sql'
|
||||
var storageName = '${toLower(namePrefix)}ctstorage'
|
||||
|
||||
resource confTestRg 'Microsoft.Resources/resourceGroups@2021-04-01' = {
|
||||
|
@ -74,6 +80,15 @@ module eventHubsNamespace 'conf-test-azure-eventHubs.bicep' = {
|
|||
}
|
||||
}
|
||||
|
||||
module iotHub 'conf-test-azure-iothub.bicep' = {
|
||||
name: iotHubName
|
||||
scope: resourceGroup(confTestRg.name)
|
||||
params: {
|
||||
confTestTags: confTestTags
|
||||
iotHubName: iotHubName
|
||||
}
|
||||
}
|
||||
|
||||
module keyVault 'conf-test-azure-keyVault.bicep' = {
|
||||
name: keyVaultName
|
||||
scope: resourceGroup(confTestRg.name)
|
||||
|
@ -95,6 +110,16 @@ module serviceBus 'conf-test-azure-servicebus.bicep' = {
|
|||
}
|
||||
}
|
||||
|
||||
module sqlServer 'conf-test-azure-sqlserver.bicep' = {
|
||||
name: sqlServerName
|
||||
scope: resourceGroup(confTestRg.name)
|
||||
params: {
|
||||
confTestTags: confTestTags
|
||||
sqlServerName: sqlServerName
|
||||
sqlServerAdminPassword: sqlServerAdminPassword
|
||||
}
|
||||
}
|
||||
|
||||
module storage 'conf-test-azure-storage.bicep' = {
|
||||
name: storageName
|
||||
scope: resourceGroup(confTestRg.name)
|
||||
|
@ -116,6 +141,11 @@ output eventHubBindingsConsumerGroupName string = eventHubsNamespace.outputs.eve
|
|||
output eventHubPubsubName string = eventHubsNamespace.outputs.eventHubPubsubName
|
||||
output eventHubPubsubPolicyName string = eventHubsNamespace.outputs.eventHubPubsubPolicyName
|
||||
output eventHubPubsubConsumerGroupName string = eventHubsNamespace.outputs.eventHubPubsubConsumerGroupName
|
||||
output iotHubName string = iotHub.name
|
||||
output iotHubBindingsConsumerGroupName string = iotHub.outputs.iotHubBindingsConsumerGroupName
|
||||
output iotHubPubsubConsumerGroupName string = iotHub.outputs.iotHubPubsubConsumerGroupName
|
||||
output keyVaultName string = keyVault.name
|
||||
output serviceBusName string = serviceBus.name
|
||||
output sqlServerName string = sqlServer.name
|
||||
output sqlServerAdminName string = sqlServer.outputs.sqlServerAdminName
|
||||
output storageName string = storage.name
|
||||
|
|
252
.github/infrastructure/conformance/azure/setup-azure-conf-test.sh
vendored
Normal file → Executable file
252
.github/infrastructure/conformance/azure/setup-azure-conf-test.sh
vendored
Normal file → Executable file
|
@ -40,6 +40,9 @@ do
|
|||
PREFIX="${opt#*=}"
|
||||
;;
|
||||
|
||||
--credentials=*)
|
||||
CREDENTIALS_PATH="${opt#*=}"
|
||||
;;
|
||||
*)
|
||||
echo "$0: Unknown option: $opt"
|
||||
ERR=1
|
||||
|
@ -66,7 +69,9 @@ OVERVIEW:
|
|||
Sets up Azure resources needed for conformance tests and populates the secrets
|
||||
needed for the conformance.yml GitHub workflow to run. Also generates a .rc file
|
||||
that can be used to set environment variables for the conformance test to be run
|
||||
on the local device.
|
||||
on the local device. The script aims to be idempotent for the same inputs and
|
||||
can be rerun on failure, except that any auto-generated Service Principal
|
||||
credentials will be rotated on rerun.
|
||||
|
||||
PREREQUISITES:
|
||||
This script requires that the Azure CLI is installed, and the user is already
|
||||
|
@ -77,13 +82,20 @@ will be deployed. For example:
|
|||
$ az account set -s "My Test Subscription"
|
||||
|
||||
USAGE:
|
||||
$ ./setup-azure-conf-test.sh --user=<Azure user UPN> [--location="..."] \
|
||||
[--prefix="..."] [--outpath="..."] [--ngrok-token="..."]
|
||||
$ ./setup-azure-conf-test.sh --user=<Azure user UPN> [--credentials="..."] \
|
||||
[--location="..."] [--prefix="..."] [--outpath="..."] [--ngrok-token="..."]
|
||||
|
||||
OPTIONS:
|
||||
-h, --help Print this help message.
|
||||
--user The UPN for the Azure user in the current subscription who
|
||||
will own all created resources, e.g. "myalias@contoso.com".
|
||||
--credentials Optional. The path to a file containing Azure credentials of
|
||||
the Service Principal (SP) that will be running conformance
|
||||
tests. The file should be the JSON output when the SP was
|
||||
created with the Azure CLI 'az ad sp create-for-rbac'
|
||||
command. If not specified, a new SP will be created, and its
|
||||
credentials saved in the AZURE_CREDENTIALS file under the
|
||||
--outpath.
|
||||
--location Optional. The location for the Azure deployment. Defaults to
|
||||
"WestUS2" if not specified.
|
||||
--prefix Optional. 3-15 character string to prefix all created
|
||||
|
@ -121,6 +133,9 @@ fi
|
|||
if [[ -z ${NGROK_TOKEN} ]]; then
|
||||
echo "WARN: --ngrok-token is not specified, will not set AzureEventGridNgrokToken used by GitHub workflow for bindings.azure.eventgrid conformance test."
|
||||
fi
|
||||
if [[ -z ${CREDENTIALS_PATH} ]]; then
|
||||
echo "INFO: --credentials is not specified, will generate a new service principal and credentials ..."
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Starting setup-azure-conf-test with the following parameters:"
|
||||
|
@ -129,6 +144,7 @@ echo "PREFIX=${PREFIX}"
|
|||
echo "DEPLOY_LOCATION=${DEPLOY_LOCATION}"
|
||||
echo "OUTPUT_PATH=${OUTPUT_PATH}"
|
||||
echo "NGROK_TOKEN=${NGROK_TOKEN}"
|
||||
echo "CREDENTIALS_PATH=${CREDENTIALS_PATH}"
|
||||
|
||||
##==============================================================================
|
||||
##
|
||||
|
@ -158,42 +174,72 @@ EVENT_HUBS_PUBSUB_CONNECTION_STRING_VAR_NAME="AzureEventHubsPubsubConnectionStri
|
|||
EVENT_HUBS_PUBSUB_CONSUMER_GROUP_VAR_NAME="AzureEventHubsPubsubConsumerGroup"
|
||||
EVENT_HUBS_PUBSUB_CONTAINER_VAR_NAME="AzureEventHubsPubsubContainer"
|
||||
|
||||
IOT_HUB_NAME_VAR_NAME="AzureIotHubName"
|
||||
IOT_HUB_EVENT_HUB_CONNECTION_STRING_VAR_NAME="AzureIotHubEventHubConnectionString"
|
||||
IOT_HUB_BINDINGS_CONSUMER_GROUP_VAR_NAME="AzureIotHubBindingsConsumerGroup"
|
||||
IOT_HUB_PUBSUB_CONSUMER_GROUP_VAR_NAME="AzureIotHubPubsubConsumerGroup"
|
||||
|
||||
KEYVAULT_CERT_NAME="AzureKeyVaultSecretStoreCert"
|
||||
KEYVAULT_CLIENT_ID_VAR_NAME="AzureKeyVaultSecretStoreClientId"
|
||||
KEYVAULT_SERVICE_PRINCIPAL_CLIENT_SECRET_VAR_NAME="AzureKeyVaultSecretStoreServicePrincipalClientSecret"
|
||||
KEYVAULT_SERVICE_PRINCIPAL_CLIENT_ID_VAR_NAME="AzureKeyVaultSecretStoreServicePrincipalClientId"
|
||||
KEYVAULT_TENANT_ID_VAR_NAME="AzureKeyVaultSecretStoreTenantId"
|
||||
KEYVAULT_NAME_VAR_NAME="AzureKeyVaultName"
|
||||
|
||||
RESOURCE_GROUP_NAME_VAR_NAME="AzureResourceGroupName"
|
||||
|
||||
SERVICE_BUS_CONNECTION_STRING_VAR_NAME="AzureServiceBusConnectionString"
|
||||
|
||||
SQL_SERVER_NAME_VAR_NAME="AzureSqlServerName"
|
||||
SQL_SERVER_CONNECTION_STRING_VAR_NAME="AzureSqlServerConnectionString"
|
||||
|
||||
STORAGE_ACCESS_KEY_VAR_NAME="AzureBlobStorageAccessKey"
|
||||
STORAGE_ACCOUNT_VAR_NAME="AzureBlobStorageAccount"
|
||||
STORAGE_CONTAINER_VAR_NAME="AzureBlobStorageContainer"
|
||||
STORAGE_QUEUE_VAR_NAME="AzureBlobStorageQueue"
|
||||
|
||||
# Derived variables
|
||||
ADMIN_ID="$(az ad user list --upn "${ADMIN_UPN}" --query "[].objectId" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
SUB_ID="$(az account show --query "id" | sed -E 's/[[:space:]]|\"//g')"
|
||||
TENANT_ID="$(az account show --query "tenantId" | sed -E 's/[[:space:]]|\"//g')"
|
||||
ADMIN_ID="$(az ad user list --upn "${ADMIN_UPN}" --query "[].objectId" --output tsv)"
|
||||
SUB_ID="$(az account show --query "id" --output tsv)"
|
||||
TENANT_ID="$(az account show --query "tenantId" --output tsv)"
|
||||
DEPLOY_NAME="${PREFIX}-azure-conf-test"
|
||||
|
||||
# Setup output path
|
||||
mkdir -p "${OUTPUT_PATH}"
|
||||
|
||||
# Configure Azure CLI to install azure-iot and other extensions without prompts
|
||||
az config set extension.use_dynamic_install=yes_without_prompt
|
||||
|
||||
# Create Service Principals for use with the conformance tests
|
||||
CERT_AUTH_SP_NAME="${PREFIX}-akv-conf-test-sp"
|
||||
az ad sp create-for-rbac --name "${CERT_AUTH_SP_NAME}" --skip-assignment --years 1
|
||||
CERT_AUTH_SP_ID="$(az ad sp list --display-name "${CERT_AUTH_SP_NAME}" --query "[].objectId" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
CERT_AUTH_SP_ID="$(az ad sp list --display-name "${CERT_AUTH_SP_NAME}" --query "[].objectId" --output tsv)"
|
||||
echo "Created Service Principal for cert auth: ${CERT_AUTH_SP_NAME}"
|
||||
|
||||
SDK_AUTH_SP_NAME="${PREFIX}-conf-test-runner-sp"
|
||||
SDK_AUTH_SP_INFO="$(az ad sp create-for-rbac --name "${SDK_AUTH_SP_NAME}" --sdk-auth --skip-assignment --years 1)"
|
||||
echo "${SDK_AUTH_SP_INFO}"
|
||||
echo "Created Service Principal for SDK Auth: ${SDK_AUTH_SP_NAME}"
|
||||
if [[ -n ${CREDENTIALS_PATH} ]]; then
|
||||
SDK_AUTH_SP_INFO="$(cat ${CREDENTIALS_PATH})"
|
||||
SDK_AUTH_SP_APPID="$(echo "${SDK_AUTH_SP_INFO}" | grep 'clientId' | sed -E 's/(.*clientId\"\: \")|\".*//g')"
|
||||
SDK_AUTH_SP_CLIENT_SECRET="$(echo "${SDK_AUTH_SP_INFO}" | grep 'clientSecret' | sed -E 's/(.*clientSecret\"\: \")|\".*//g')"
|
||||
if [[ -z ${SDK_AUTH_SP_APPID} || -z ${SDK_AUTH_SP_CLIENT_SECRET} ]]; then
|
||||
echo "Invalid credentials JSON file. Contents should match output of 'az ad sp create-for-rbac' command."
|
||||
exit 1
|
||||
fi
|
||||
SDK_AUTH_SP_NAME="$(az ad sp show --id "${SDK_AUTH_SP_APPID}" --query "appDisplayName" --output tsv)"
|
||||
SDK_AUTH_SP_ID="$(az ad sp show --id "${SDK_AUTH_SP_APPID}" --query "objectId" --output tsv)"
|
||||
echo "Using Service Principal from ${CREDENTIALS_PATH} for SDK Auth: ${SDK_AUTH_SP_NAME}"
|
||||
else
|
||||
SDK_AUTH_SP_NAME="${PREFIX}-conf-test-runner-sp"
|
||||
SDK_AUTH_SP_INFO="$(az ad sp create-for-rbac --name "${SDK_AUTH_SP_NAME}" --sdk-auth --skip-assignment --years 1)"
|
||||
SDK_AUTH_SP_CLIENT_SECRET="$(echo "${SDK_AUTH_SP_INFO}" | grep 'clientSecret' | sed -E 's/(.*clientSecret\"\: \")|\".*//g')"
|
||||
SDK_AUTH_SP_ID="$(az ad sp list --display-name "${SDK_AUTH_SP_NAME}" --query "[].objectId" --output tsv)"
|
||||
echo "${SDK_AUTH_SP_INFO}"
|
||||
echo "Created Service Principal for SDK Auth: ${SDK_AUTH_SP_NAME}"
|
||||
AZURE_CREDENTIALS_FILENAME="${OUTPUT_PATH}/AZURE_CREDENTIALS"
|
||||
echo "${SDK_AUTH_SP_INFO}" > "${AZURE_CREDENTIALS_FILENAME}"
|
||||
fi
|
||||
|
||||
AZURE_CREDENTIALS_FILENAME="${OUTPUT_PATH}/AZURE_CREDENTIALS"
|
||||
echo "${SDK_AUTH_SP_INFO}" > "${AZURE_CREDENTIALS_FILENAME}"
|
||||
SDK_AUTH_SP_CLIENT_SECRET="$(echo "${SDK_AUTH_SP_INFO}" | grep 'clientSecret' | sed -E 's/(.*clientSecret\"\: \")|\",//g')"
|
||||
SDK_AUTH_SP_ID="$(az ad sp list --display-name "${SDK_AUTH_SP_NAME}" --query "[].objectId" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
# Generate new password for SQL Server admin
|
||||
SQL_SERVER_ADMIN_PASSWORD=$(openssl rand -base64 32)
|
||||
|
||||
# Build the bicep template and deploy to Azure
|
||||
az bicep install
|
||||
|
@ -202,39 +248,87 @@ echo "Building conf-test-azure.bicep to ${ARM_TEMPLATE_FILE} ..."
|
|||
az bicep build --file conf-test-azure.bicep --outfile "${ARM_TEMPLATE_FILE}"
|
||||
|
||||
echo "Creating azure deployment ${DEPLOY_NAME} in ${DEPLOY_LOCATION} and resource prefix ${PREFIX}-* ..."
|
||||
az deployment sub create --name "${DEPLOY_NAME}" --location "${DEPLOY_LOCATION}" --template-file "${ARM_TEMPLATE_FILE}" -p namePrefix="${PREFIX}" -p adminId="${ADMIN_ID}" -p certAuthSpId="${CERT_AUTH_SP_ID}" -p sdkAuthSpId="${SDK_AUTH_SP_ID}" -p rgLocation="${DEPLOY_LOCATION}"
|
||||
az deployment sub create --name "${DEPLOY_NAME}" --location "${DEPLOY_LOCATION}" --template-file "${ARM_TEMPLATE_FILE}" -p namePrefix="${PREFIX}" -p adminId="${ADMIN_ID}" -p certAuthSpId="${CERT_AUTH_SP_ID}" -p sdkAuthSpId="${SDK_AUTH_SP_ID}" -p rgLocation="${DEPLOY_LOCATION}" -p sqlServerAdminPassword="${SQL_SERVER_ADMIN_PASSWORD}"
|
||||
|
||||
echo "Sleeping for 5s to allow created ARM deployment info to propagate to query endpoints ..."
|
||||
sleep 5s
|
||||
|
||||
# Query the deployed resource names from the bicep deployment outputs
|
||||
RESOURCE_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.confTestRgName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
echo "Querying deployed resource names ..."
|
||||
RESOURCE_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.confTestRgName.value" --output tsv)"
|
||||
echo "INFO: RESOURCE_GROUP_NAME=${RESOURCE_GROUP_NAME}"
|
||||
SERVICE_BUS_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.serviceBusName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
SERVICE_BUS_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.serviceBusName.value" --output tsv)"
|
||||
echo "INFO: SERVICE_BUS_NAME=${SERVICE_BUS_NAME}"
|
||||
KEYVAULT_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.keyVaultName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
KEYVAULT_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.keyVaultName.value" --output tsv)"
|
||||
echo "INFO: KEYVAULT_NAME=${KEYVAULT_NAME}"
|
||||
STORAGE_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.storageName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
STORAGE_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.storageName.value" --output tsv)"
|
||||
echo "INFO: STORAGE_NAME=${STORAGE_NAME}"
|
||||
COSMOS_DB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.cosmosDbName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
COSMOS_DB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.cosmosDbName.value" --output tsv)"
|
||||
echo "INFO: COSMOS_DB_NAME=${COSMOS_DB_NAME}"
|
||||
COSMOS_DB_SQL_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.cosmosDbSqlName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
COSMOS_DB_SQL_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.cosmosDbSqlName.value" --output tsv)"
|
||||
echo "INFO: COSMOS_DB_SQL_NAME=${COSMOS_DB_SQL_NAME}"
|
||||
COSMOS_DB_CONTAINER_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.cosmosDbSqlContainerName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
COSMOS_DB_CONTAINER_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.cosmosDbSqlContainerName.value" --output tsv)"
|
||||
echo "INFO: COSMOS_DB_CONTAINER_NAME=${COSMOS_DB_CONTAINER_NAME}"
|
||||
EVENT_GRID_TOPIC_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventGridTopicName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_GRID_TOPIC_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventGridTopicName.value" --output tsv)"
|
||||
echo "INFO: EVENT_GRID_TOPIC_NAME=${EVENT_GRID_TOPIC_NAME}"
|
||||
EVENT_HUBS_NAMESPACE="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubsNamespace.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUBS_NAMESPACE="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubsNamespace.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUBS_NAMESPACE=${EVENT_HUBS_NAMESPACE}"
|
||||
EVENT_HUB_BINDINGS_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUB_BINDINGS_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsName.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUB_BINDINGS_NAME=${EVENT_HUB_BINDINGS_NAME}"
|
||||
EVENT_HUB_BINDINGS_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsPolicyName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUB_BINDINGS_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsPolicyName.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUB_BINDINGS_POLICY_NAME=${EVENT_HUB_BINDINGS_POLICY_NAME}"
|
||||
EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsConsumerGroupName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubBindingsConsumerGroupName.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME=${EVENT_HUBS_BINDINGS_CONSUMER_GROUP_NAME}"
|
||||
EVENT_HUB_PUBSUB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUB_PUBSUB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubName.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUB_PUBSUB_NAME=${EVENT_HUB_PUBSUB_NAME}"
|
||||
EVENT_HUB_PUBSUB_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubPolicyName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUB_PUBSUB_POLICY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubPolicyName.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUB_PUBSUB_POLICY_NAME=${EVENT_HUB_PUBSUB_POLICY_NAME}"
|
||||
EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubConsumerGroupName.value" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.eventHubPubsubConsumerGroupName.value" --output tsv)"
|
||||
echo "INFO: EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME=${EVENT_HUBS_PUBSUB_CONSUMER_GROUP_NAME}"
|
||||
IOT_HUB_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.iotHubName.value" --output tsv)"
|
||||
echo "INFO: IOT_HUB_NAME=${IOT_HUB_NAME}"
|
||||
IOT_HUB_BINDINGS_CONSUMER_GROUP_FULLNAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.iotHubBindingsConsumerGroupName.value" --output tsv)"
|
||||
echo "INFO: IOT_HUB_BINDINGS_CONSUMER_GROUP_FULLNAME=${IOT_HUB_BINDINGS_CONSUMER_GROUP_FULLNAME}"
|
||||
IOT_HUB_PUBSUB_CONSUMER_GROUP_FULLNAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.iotHubPubsubConsumerGroupName.value" --output tsv)"
|
||||
echo "INFO: IOT_HUB_PUBSUB_CONSUMER_GROUP_FULLNAME=${IOT_HUB_PUBSUB_CONSUMER_GROUP_FULLNAME}"
|
||||
SQL_SERVER_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.sqlServerName.value" --output tsv)"
|
||||
echo "INFO: SQL_SERVER_NAME=${SQL_SERVER_NAME}"
|
||||
SQL_SERVER_ADMIN_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.sqlServerAdminName.value" --output tsv)"
|
||||
echo "INFO: SQL_SERVER_ADMIN_NAME=${SQL_SERVER_ADMIN_NAME}"
|
||||
|
||||
# Give the service principal used by the SDK write access to the entire resource group
|
||||
MSYS_NO_PATHCONV=1 az role assignment create --assignee "${SDK_AUTH_SP_ID}" --role "Contributor" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}"
|
||||
|
||||
# Create Identity if it doesn't exist
|
||||
# We use the standard name "azure-managed-identity" for the identity so we can easily query for it later using the CLI
|
||||
if az identity show -g ${RESOURCE_GROUP_NAME} -n azure-managed-identity --query id -otsv; then
|
||||
echo "Reusing Identity azure-managed-identity"
|
||||
MANAGED_IDENTITY_SP="$(az identity show -g ${RESOURCE_GROUP_NAME} -n azure-managed-identity --query principalId -otsv)"
|
||||
else
|
||||
echo "Creating Identity azure-managed-identity"
|
||||
MANAGED_IDENTITY_SP="$(az identity create -g ${RESOURCE_GROUP_NAME} -n azure-managed-identity --location ${DEPLOY_LOCATION} --query principalId -otsv)"
|
||||
# This identity can later be injected into services for managed identity authentication
|
||||
fi
|
||||
|
||||
MANAGED_IDENTITY_ID="$(az identity show -g ${RESOURCE_GROUP_NAME} -n azure-managed-identity --query id -otsv)"
|
||||
echo "Created Identity ${MANAGED_IDENTITY_ID}"
|
||||
|
||||
# Example to inject the identity into a supported Azure service (may be necessary in integration tests):
|
||||
# az container create -g ${RESOURCE_GROUP_NAME} -n testcontainer --image golang:latest --command-line "tail -f /dev/null" --assign-identity $MANAGED_IDENTITY_ID
|
||||
|
||||
echo "Granting identity azure-managed-identity permissions to access the Key Vault ${KEYVAULT_NAME}"
|
||||
az keyvault set-policy --name "${KEYVAULT_NAME}" -g "${RESOURCE_GROUP_NAME}" --secret-permissions get list --object-id "${MANAGED_IDENTITY_SP}"
|
||||
# Other tests verifying managed identity will want to grant permission like so:
|
||||
# MSYS_NO_PATHCONV=1 az role assignment create --assignee-object-id "${MANAGED_IDENTITY_SP}" --assignee-principal-type ServicePrincipal --role "Azure Service Bus Data Owner" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.ServiceBus/namespaces/${SERVICE_BUS_NAME}"
|
||||
|
||||
# Creating service principal for service principal authentication with KeyVault
|
||||
AKV_SPAUTH_SP_NAME="${PREFIX}-akv-spauth-conf-test-sp"
|
||||
echo "Creating service principal ${AKV_SPAUTH_SP_NAME} for use with KeyVault ${KEYVAULT_NAME}"
|
||||
{ read AKV_SPAUTH_SP_CLIENT_ID ; read AKV_SPAUTH_SP_CLIENT_SECRET ; } < <(az ad sp create-for-rbac --name ${AKV_SPAUTH_SP_NAME} --skip-assignment --years 1 --query "[appId,password]" -otsv)
|
||||
|
||||
# Give the service principal read access to the KeyVault Secrets
|
||||
AKV_SPAUTH_SP_OBJECTID="$(az ad sp show --id ${AKV_SPAUTH_SP_CLIENT_ID} --query objectId -otsv)"
|
||||
az keyvault set-policy --name "${KEYVAULT_NAME}" -g "${RESOURCE_GROUP_NAME}" --secret-permissions get list --object-id "${AKV_SPAUTH_SP_OBJECTID}"
|
||||
|
||||
# Update service principal credentials and roles for created resources
|
||||
echo "Creating ${CERT_AUTH_SP_NAME} certificate ..."
|
||||
|
@ -246,6 +340,16 @@ EVENT_GRID_SCOPE="/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}
|
|||
echo "Assigning \"EventGrid EventSubscription Contributor\" role to ${SDK_AUTH_SP_NAME} in scope \"${EVENT_GRID_SCOPE}\"..."
|
||||
MSYS_NO_PATHCONV=1 az role assignment create --assignee "${SDK_AUTH_SP_ID}" --role "EventGrid EventSubscription Contributor" --scope "${EVENT_GRID_SCOPE}"
|
||||
|
||||
# Add Contributor role to the SDK auth Service Principal so it can add devices to the Azure IoT Hub for tests.
|
||||
IOT_HUB_SCOPE="/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.Devices/IotHubs/${IOT_HUB_NAME}"
|
||||
echo "Assigning \"Contributor\" role to ${SDK_AUTH_SP_NAME} in scope \"${IOT_HUB_SCOPE}\"..."
|
||||
MSYS_NO_PATHCONV=1 az role assignment create --assignee "${SDK_AUTH_SP_ID}" --role "Contributor" --scope "${IOT_HUB_SCOPE}"
|
||||
|
||||
# Add SQL Server Contributor role to the SDK auth Service Principal so it can update firewall rules to run sqlserver state conformance tests.
|
||||
SQL_SERVER_SCOPE="/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.Sql/servers/${SQL_SERVER_NAME}"
|
||||
echo "Assigning \"Contributor\" role to ${SDK_AUTH_SP_NAME} in scope \"${SQL_SERVER_SCOPE}\"..."
|
||||
MSYS_NO_PATHCONV=1 az role assignment create --assignee "${SDK_AUTH_SP_ID}" --role "Contributor" --scope "${SQL_SERVER_SCOPE}"
|
||||
|
||||
##==============================================================================
|
||||
##
|
||||
## Create output files for environment config and teardown of conformance tests
|
||||
|
@ -270,10 +374,17 @@ echo "Purging key vault ${KEYVAULT_NAME} ..."
|
|||
az keyvault purge --name "${KEYVAULT_NAME}"
|
||||
echo "Deleting service principal ${CERT_AUTH_SP_NAME} ..."
|
||||
az ad sp delete --id "${CERT_AUTH_SP_ID}"
|
||||
echo "Deleting service principal ${SDK_AUTH_SP_NAME} ..."
|
||||
az ad sp delete --id "${SDK_AUTH_SP_ID}"
|
||||
echo "INFO: ${PREFIX}-teardown-conf-test completed."
|
||||
echo "Deleting service principal ${AKV_SPAUTH_SP_NAME} ..."
|
||||
az ad sp delete --id "${AKV_SPAUTH_SP_OBJECTID}"
|
||||
EOF
|
||||
|
||||
# Only remove the test runner Service Principal if it was not pre-existing
|
||||
if [[ -z ${CREDENTIALS_PATH} ]]; then
|
||||
echo "echo \"Deleting service principal ${SDK_AUTH_SP_NAME} ...\"" >> "${TEARDOWN_SCRIPT_NAME}"
|
||||
echo "az ad sp delete --id \"${SDK_AUTH_SP_ID}\"" >> "${TEARDOWN_SCRIPT_NAME}"
|
||||
fi
|
||||
echo "echo \"INFO: ${PREFIX}-teardown-conf-test completed.\"" >> "${TEARDOWN_SCRIPT_NAME}"
|
||||
|
||||
chmod +x "${TEARDOWN_SCRIPT_NAME}"
|
||||
echo "INFO: Created ${TEARDOWN_SCRIPT_NAME}."
|
||||
|
||||
|
@ -318,14 +429,21 @@ echo export ${KEYVAULT_CERT_NAME}=\"${KEYVAULT_CERT_FILE}\" >> "${ENV_CONFIG_FIL
|
|||
echo export ${KEYVAULT_NAME_VAR_NAME}=\"${KEYVAULT_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${KEYVAULT_NAME_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${KEYVAULT_NAME}"
|
||||
|
||||
KEYVAULT_TENANT_ID="$(az ad sp list --display-name "${CERT_AUTH_SP_NAME}" --query "[].appOwnerTenantId" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
KEYVAULT_TENANT_ID="$(az ad sp list --display-name "${CERT_AUTH_SP_NAME}" --query "[].appOwnerTenantId" --output tsv)"
|
||||
echo export ${KEYVAULT_TENANT_ID_VAR_NAME}=\"${KEYVAULT_TENANT_ID}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${KEYVAULT_TENANT_ID_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${KEYVAULT_TENANT_ID}"
|
||||
|
||||
KEYVAULT_CLIENT_ID="$(az ad sp list --display-name "${CERT_AUTH_SP_NAME}" --query "[].appId" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
KEYVAULT_CLIENT_ID="$(az ad sp list --display-name "${CERT_AUTH_SP_NAME}" --query "[].appId" --output tsv)"
|
||||
echo export ${KEYVAULT_CLIENT_ID_VAR_NAME}=\"${KEYVAULT_CLIENT_ID}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${KEYVAULT_CLIENT_ID_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${KEYVAULT_CLIENT_ID}"
|
||||
|
||||
KEYVAULT_SERVICE_PRINCIPAL_CLIENT_ID=${AKV_SPAUTH_SP_CLIENT_ID}
|
||||
echo export ${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_ID_VAR_NAME}=\"${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_ID}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_ID_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_ID}"
|
||||
|
||||
KEYVAULT_SERVICE_PRINCIPAL_CLIENT_SECRET=${AKV_SPAUTH_SP_CLIENT_SECRET}
|
||||
echo export ${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_SECRET_VAR_NAME}=\"${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_SECRET}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_SECRET_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${KEYVAULT_SERVICE_PRINCIPAL_CLIENT_SECRET}"
|
||||
# ------------------------------------
|
||||
# Populate Blob Storage test settings
|
||||
# ------------------------------------
|
||||
|
@ -342,7 +460,7 @@ STORAGE_QUEUE_NAME="${PREFIX}-conf-test-queue"
|
|||
echo export ${STORAGE_QUEUE_VAR_NAME}=\"${STORAGE_QUEUE_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${STORAGE_QUEUE_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${STORAGE_QUEUE_NAME}"
|
||||
|
||||
STORAGE_ACCESS_KEY="$(az storage account keys list --account-name "${STORAGE_NAME}" --query "[?keyName=='key1'].value" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
STORAGE_ACCESS_KEY="$(az storage account keys list --account-name "${STORAGE_NAME}" --query "[?keyName=='key1'].value" --output tsv)"
|
||||
echo export ${STORAGE_ACCESS_KEY_VAR_NAME}=\"${STORAGE_ACCESS_KEY}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${STORAGE_ACCESS_KEY_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${STORAGE_ACCESS_KEY}"
|
||||
|
||||
|
@ -358,11 +476,11 @@ az keyvault secret set --name "${COSMOS_DB_VAR_NAME}" --vault-name "${KEYVAULT_N
|
|||
echo export ${COSMOS_DB_COLLECTION_VAR_NAME}=\"${COSMOS_DB_CONTAINER_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${COSMOS_DB_COLLECTION_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${COSMOS_DB_CONTAINER_NAME}"
|
||||
|
||||
COSMOS_DB_URL="$(az cosmosdb list --query "[?name=='${COSMOS_DB_NAME}'].documentEndpoint" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
COSMOS_DB_URL="$(az cosmosdb list --query "[?name=='${COSMOS_DB_NAME}'].documentEndpoint" --output tsv)"
|
||||
echo export ${COSMOS_DB_URL_VAR_NAME}=\"${COSMOS_DB_URL}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${COSMOS_DB_URL_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${COSMOS_DB_URL}"
|
||||
|
||||
COSMOS_DB_MASTER_KEY="$(az cosmosdb keys list --name "${COSMOS_DB_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryMasterKey" | sed -E 's/[[:space:]]|\"//g')"
|
||||
COSMOS_DB_MASTER_KEY="$(az cosmosdb keys list --name "${COSMOS_DB_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryMasterKey" --output tsv)"
|
||||
echo export ${COSMOS_DB_MASTER_KEY_VAR_NAME}=\"${COSMOS_DB_MASTER_KEY}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${COSMOS_DB_MASTER_KEY_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${COSMOS_DB_MASTER_KEY}"
|
||||
|
||||
|
@ -371,11 +489,11 @@ az keyvault secret set --name "${COSMOS_DB_MASTER_KEY_VAR_NAME}" --vault-name "$
|
|||
# ----------------------------------
|
||||
echo "Configuring Event Grid test settings ..."
|
||||
|
||||
EVENT_GRID_ACCESS_KEY="$(az eventgrid topic key list --name "${EVENT_GRID_TOPIC_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "key1" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_GRID_ACCESS_KEY="$(az eventgrid topic key list --name "${EVENT_GRID_TOPIC_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "key1" --output tsv)"
|
||||
echo export ${EVENT_GRID_ACCESS_KEY_VAR_NAME}=\"${EVENT_GRID_ACCESS_KEY}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_GRID_ACCESS_KEY_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_GRID_ACCESS_KEY}"
|
||||
|
||||
SDK_AUTH_SP_APP_ID="$(az ad sp list --display-name "${SDK_AUTH_SP_NAME}" --query "[].appId" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
SDK_AUTH_SP_APP_ID="$(az ad sp list --display-name "${SDK_AUTH_SP_NAME}" --query "[].appId" --output tsv)"
|
||||
echo export ${EVENT_GRID_CLIENT_ID_VAR_NAME}=\"${SDK_AUTH_SP_APP_ID}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_GRID_CLIENT_ID_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${SDK_AUTH_SP_APP_ID}"
|
||||
|
||||
|
@ -398,7 +516,7 @@ az keyvault secret set --name "${EVENT_GRID_SUB_ID_VAR_NAME}" --vault-name "${KE
|
|||
echo export ${EVENT_GRID_TENANT_ID_VAR_NAME}=\"${TENANT_ID}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_GRID_TENANT_ID_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${TENANT_ID}"
|
||||
|
||||
EVENT_GRID_TOPIC_ENDPOINT="$(az eventgrid topic list --query "[?name=='${EVENT_GRID_TOPIC_NAME}'].endpoint" | grep \" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_GRID_TOPIC_ENDPOINT="$(az eventgrid topic list --query "[?name=='${EVENT_GRID_TOPIC_NAME}'].endpoint" --output tsv)"
|
||||
echo export ${EVENT_GRID_TOPIC_ENDPOINT_VAR_NAME}=\"${EVENT_GRID_TOPIC_ENDPOINT}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_GRID_TOPIC_ENDPOINT_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_GRID_TOPIC_ENDPOINT}"
|
||||
|
||||
|
@ -406,16 +524,33 @@ az keyvault secret set --name "${EVENT_GRID_TOPIC_ENDPOINT_VAR_NAME}" --vault-na
|
|||
# Populate Service Bus test settings
|
||||
# -----------------------------------
|
||||
echo "Configuring Service Bus test settings ..."
|
||||
SERVICE_BUS_CONNECTION_STRING="$(az servicebus namespace authorization-rule keys list --name RootManageSharedAccessKey --namespace-name "${SERVICE_BUS_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" | sed -E 's/[[:space:]]|\"//g')"
|
||||
SERVICE_BUS_CONNECTION_STRING="$(az servicebus namespace authorization-rule keys list --name RootManageSharedAccessKey --namespace-name "${SERVICE_BUS_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" --output tsv)"
|
||||
echo export ${SERVICE_BUS_CONNECTION_STRING_VAR_NAME}=\"${SERVICE_BUS_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${SERVICE_BUS_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${SERVICE_BUS_CONNECTION_STRING}"
|
||||
|
||||
# ----------------------------------
|
||||
# Populate SQL Server test settings
|
||||
# ----------------------------------
|
||||
echo "Configuring SQL Server test settings ..."
|
||||
|
||||
# Not specific to SQL server, but this is currently only consumed by setting SQL server firewall rules
|
||||
echo export ${RESOURCE_GROUP_NAME_VAR_NAME}=\"${RESOURCE_GROUP_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${RESOURCE_GROUP_NAME_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${RESOURCE_GROUP_NAME}"
|
||||
|
||||
echo export ${SQL_SERVER_NAME_VAR_NAME}=\"${SQL_SERVER_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${SQL_SERVER_NAME_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${SQL_SERVER_NAME}"
|
||||
|
||||
# Note that `az sql db show-connection-string` does not currently support a `go` --client type, so we construct our own here.
|
||||
SQL_SERVER_CONNECTION_STRING="Server=${SQL_SERVER_NAME}.database.windows.net;port=1433;User ID=${SQL_SERVER_ADMIN_NAME};Password=${SQL_SERVER_ADMIN_PASSWORD};Encrypt=true;"
|
||||
echo export ${SQL_SERVER_CONNECTION_STRING_VAR_NAME}=\"${SQL_SERVER_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${SQL_SERVER_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${SQL_SERVER_CONNECTION_STRING}"
|
||||
|
||||
# ----------------------------------
|
||||
# Populate Event Hubs test settings
|
||||
# ----------------------------------
|
||||
echo "Configuring Event Hub test settings ..."
|
||||
|
||||
EVENT_HUBS_BINDINGS_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_BINDINGS_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_BINDINGS_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUBS_BINDINGS_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_BINDINGS_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_BINDINGS_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" --output tsv)"
|
||||
echo export ${EVENT_HUBS_BINDINGS_CONNECTION_STRING_VAR_NAME}=\"${EVENT_HUBS_BINDINGS_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_BINDINGS_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_BINDINGS_CONNECTION_STRING}"
|
||||
|
||||
|
@ -426,7 +561,7 @@ EVENT_HUBS_BINDINGS_CONTAINER_NAME="${PREFIX}-eventhubs-bindings-container"
|
|||
echo export ${EVENT_HUBS_BINDINGS_CONTAINER_VAR_NAME}=\"${EVENT_HUBS_BINDINGS_CONTAINER_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_BINDINGS_CONTAINER_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_BINDINGS_CONTAINER_NAME}"
|
||||
|
||||
EVENT_HUBS_PUBSUB_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_PUBSUB_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_PUBSUB_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" | sed -E 's/[[:space:]]|\"//g')"
|
||||
EVENT_HUBS_PUBSUB_CONNECTION_STRING="$(az eventhubs eventhub authorization-rule keys list --name "${EVENT_HUB_PUBSUB_POLICY_NAME}" --namespace-name "${EVENT_HUBS_NAMESPACE}" --eventhub-name "${EVENT_HUB_PUBSUB_NAME}" --resource-group "${RESOURCE_GROUP_NAME}" --query "primaryConnectionString" --output tsv)"
|
||||
echo export ${EVENT_HUBS_PUBSUB_CONNECTION_STRING_VAR_NAME}=\"${EVENT_HUBS_PUBSUB_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_PUBSUB_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_PUBSUB_CONNECTION_STRING}"
|
||||
|
||||
|
@ -437,7 +572,32 @@ EVENT_HUBS_PUBSUB_CONTAINER_NAME="${PREFIX}-eventhubs-pubsub-container"
|
|||
echo export ${EVENT_HUBS_PUBSUB_CONTAINER_VAR_NAME}=\"${EVENT_HUBS_PUBSUB_CONTAINER_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${EVENT_HUBS_PUBSUB_CONTAINER_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${EVENT_HUBS_PUBSUB_CONTAINER_NAME}"
|
||||
|
||||
# ----------------------------------
|
||||
# Populate IoT Hub test settings
|
||||
# ----------------------------------
|
||||
echo "Configuring IoT Hub test settings ..."
|
||||
|
||||
echo export ${IOT_HUB_NAME_VAR_NAME}=\"${IOT_HUB_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${IOT_HUB_NAME_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${IOT_HUB_NAME}"
|
||||
|
||||
IOT_HUB_EVENT_HUB_CONNECTION_STRING="$(az iot hub connection-string show -n ${IOT_HUB_NAME} --default-eventhub --policy-name service --query connectionString --output tsv)"
|
||||
echo export ${IOT_HUB_EVENT_HUB_CONNECTION_STRING_VAR_NAME}=\"${IOT_HUB_EVENT_HUB_CONNECTION_STRING}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${IOT_HUB_EVENT_HUB_CONNECTION_STRING_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${IOT_HUB_EVENT_HUB_CONNECTION_STRING}"
|
||||
|
||||
IOT_HUB_BINDINGS_CONSUMER_GROUP_NAME="$(basename ${IOT_HUB_BINDINGS_CONSUMER_GROUP_FULLNAME})"
|
||||
echo export ${IOT_HUB_BINDINGS_CONSUMER_GROUP_VAR_NAME}=\"${IOT_HUB_BINDINGS_CONSUMER_GROUP_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${IOT_HUB_BINDINGS_CONSUMER_GROUP_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${IOT_HUB_BINDINGS_CONSUMER_GROUP_NAME}"
|
||||
|
||||
IOT_HUB_PUBSUB_CONSUMER_GROUP_NAME="$(basename ${IOT_HUB_PUBSUB_CONSUMER_GROUP_FULLNAME})"
|
||||
echo export ${IOT_HUB_PUBSUB_CONSUMER_GROUP_VAR_NAME}=\"${IOT_HUB_PUBSUB_CONSUMER_GROUP_NAME}\" >> "${ENV_CONFIG_FILENAME}"
|
||||
az keyvault secret set --name "${IOT_HUB_PUBSUB_CONSUMER_GROUP_VAR_NAME}" --vault-name "${KEYVAULT_NAME}" --value "${IOT_HUB_PUBSUB_CONSUMER_GROUP_NAME}"
|
||||
|
||||
# ---------------------------
|
||||
# Display completion message
|
||||
# ---------------------------
|
||||
echo "INFO: setup-azure-conf-test completed."
|
||||
echo "INFO: Remember to \`source ${ENV_CONFIG_FILENAME}\` before running local conformance tests."
|
||||
echo "INFO: ${AZURE_CREDENTIALS_FILENAME} contains the repository secret to set to run the GitHub conformance test workflow."
|
||||
if [[ -z ${CREDENTIALS_PATH} ]]; then
|
||||
echo "INFO: ${AZURE_CREDENTIALS_FILENAME} contains the repository secret to set to run the GitHub conformance test workflow."
|
||||
fi
|
||||
echo "INFO: To teardown the conformance test resources, run ${TEARDOWN_SCRIPT_NAME}."
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
name: Components Conformance Tests
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [conformance-test]
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '*/30 * * * *'
|
||||
|
@ -58,6 +60,7 @@ jobs:
|
|||
- pubsub.mqtt-vernemq
|
||||
- pubsub.hazelcast
|
||||
- pubsub.rabbitmq
|
||||
- pubsub.in-memory
|
||||
- secretstores.kubernetes
|
||||
- secretstores.localenv
|
||||
- secretstores.localfile
|
||||
|
@ -90,21 +93,31 @@ jobs:
|
|||
#
|
||||
# Only list the secrets you need for the component.
|
||||
CRON_COMPONENTS=$(yq -I0 --tojson eval - << EOF
|
||||
- component: state.cosmosdb
|
||||
- component: state.azure.cosmosdb
|
||||
required-secrets: AzureCosmosDBMasterKey,AzureCosmosDBUrl,AzureCosmosDB,AzureCosmosDBCollection
|
||||
- component: state.azure.sql
|
||||
required-secrets: AzureResourceGroupName, AzureSqlServerName, AzureSqlServerConnectionString
|
||||
- component: state.azure.tablestorage
|
||||
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount
|
||||
- component: pubsub.azure.eventhubs
|
||||
required-secrets: AzureEventHubsPubsubConnectionString,AzureEventHubsPubsubConsumerGroup,AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureEventHubsPubsubContainer
|
||||
- component: pubsub.azure.servicebus
|
||||
required-secrets: AzureServiceBusConnectionString
|
||||
- component: bindings.azure.blobstorage
|
||||
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureBlobStorageContainer
|
||||
- component: bindings.azure.eventgrid
|
||||
required-secrets: AzureEventGridNgrokToken,AzureEventGridAccessKey,AzureEventGridTopicEndpoint,AzureEventGridScope,AzureEventGridClientSecret,AzureEventGridClientId,AzureEventGridTenantId,AzureEventGridSubscriptionId
|
||||
- component: bindings.azure.eventhubs
|
||||
required-secrets: AzureEventHubsBindingsConnectionString,AzureEventHubsBindingsConsumerGroup,AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureEventHubsBindingsContainer
|
||||
- component: bindings.azure.servicebusqueues
|
||||
required-secrets: AzureServiceBusConnectionString
|
||||
- component: bindings.azure.storagequeues
|
||||
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureBlobStorageQueue
|
||||
- component: secretstores.azure.keyvault
|
||||
- component: secretstores.azure.keyvault.certificate
|
||||
required-secrets: AzureKeyVaultName,AzureKeyVaultSecretStoreTenantId,AzureKeyVaultSecretStoreClientId
|
||||
required-certs: AzureKeyVaultSecretStoreCert
|
||||
- component: secretstores.azure.keyvault.serviceprincipal
|
||||
required-secrets: AzureKeyVaultName,AzureKeyVaultSecretStoreTenantId,AzureKeyVaultSecretStoreServicePrincipalClientId,AzureKeyVaultSecretStoreServicePrincipalClientSecret
|
||||
EOF
|
||||
)
|
||||
echo "::set-output name=cron-components::$CRON_COMPONENTS"
|
||||
|
@ -127,6 +140,21 @@ jobs:
|
|||
component: ${{ fromJson(needs.generate-matrix.outputs.pr-components) }}
|
||||
include: ${{ fromJson(needs.generate-matrix.outputs.cron-components) }}
|
||||
steps:
|
||||
- name: Parse repository_dispatch payload
|
||||
if: github.event_name == 'repository_dispatch'
|
||||
uses: actions/github-script@v1
|
||||
with:
|
||||
github-token: ${{secrets.DAPR_BOT_TOKEN}}
|
||||
script: |
|
||||
const testPayload = context.payload.client_payload;
|
||||
if (testPayload && testPayload.command == "ok-to-test") {
|
||||
var fs = require('fs');
|
||||
// Set environment variables
|
||||
fs.appendFileSync(process.env.GITHUB_ENV,
|
||||
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
|
||||
`CHECKOUT_REF=${testPayload.pull_head_ref}`);
|
||||
}
|
||||
|
||||
- name: Check out code onto GOPATH
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
|
@ -262,6 +290,18 @@ jobs:
|
|||
go mod download
|
||||
go install gotest.tools/gotestsum@latest
|
||||
|
||||
- name: Configure Azure SQL Firewall
|
||||
run: |
|
||||
set +e
|
||||
TEST_OUTPUT="$(go test -v -tags=conftests -count=1 -timeout=1m ./tests/conformance -run=TestStateConformance/azure.sql)"
|
||||
echo "Trial run result:\n\"$TEST_OUTPUT\""
|
||||
PUBLIC_IP=$(echo "$TEST_OUTPUT" | grep -Po "Client with IP address '\K[^']*")
|
||||
if [[ -n ${PUBLIC_IP} ]]; then
|
||||
echo "Setting Azure SQL firewall-rule AllowTestRunnerIP to allow $PUBLIC_IP..."
|
||||
az sql server firewall-rule create --resource-group ${{ env.AzureResourceGroupName }} --server ${{ env.AzureSqlServerName }} -n "AllowTestRunnerIP" --start-ip-address "$PUBLIC_IP" --end-ip-address "$PUBLIC_IP"
|
||||
fi
|
||||
if: contains(matrix.component, 'azure.sql')
|
||||
|
||||
- name: Run tests
|
||||
continue-on-error: true
|
||||
run: |
|
||||
|
@ -300,6 +340,13 @@ jobs:
|
|||
continue-on-error: true
|
||||
run: pkill ngrok; cat /tmp/ngrok.log
|
||||
|
||||
- name: Cleanup Azure SQL Firewall and test DB instance
|
||||
if: contains(matrix.component, 'azure.sql')
|
||||
continue-on-error: true
|
||||
run: |
|
||||
az sql server firewall-rule delete --resource-group ${{ env.AzureResourceGroupName }} --server ${{ env.AzureSqlServerName }} -n "AllowTestRunnerIP"
|
||||
az sql db delete --resource-group ${{ env.AzureResourceGroupName }} --server ${{ env.AzureSqlServerName }} -n dapr --yes
|
||||
|
||||
# Download the required certificates into files, and set env var pointing to their names
|
||||
- name: Clean up certs
|
||||
if: matrix.required-certs != ''
|
||||
|
|
|
@ -18,6 +18,33 @@ jobs:
|
|||
const isFromPulls = !!payload.issue.pull_request;
|
||||
const commentBody = payload.comment.body;
|
||||
|
||||
if (isFromPulls && commentBody) {
|
||||
if (commentBody.indexOf("/ok-to-test") == 0) {
|
||||
// Get pull request
|
||||
const pull = await github.pulls.get({
|
||||
owner: issue.owner,
|
||||
repo: issue.repo,
|
||||
pull_number: issue.number
|
||||
});
|
||||
if (pull && pull.data) {
|
||||
// Get commit id and repo from pull head
|
||||
const testPayload = {
|
||||
pull_head_ref: pull.data.head.sha,
|
||||
pull_head_repo: pull.data.head.repo.full_name,
|
||||
command: "ok-to-test",
|
||||
issue: issue,
|
||||
};
|
||||
// Fire repository_dispatch event to trigger conformance test
|
||||
await github.repos.createDispatchEvent({
|
||||
owner: issue.owner,
|
||||
repo: issue.repo,
|
||||
event_type: "conformance-test",
|
||||
client_payload: testPayload,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!isFromPulls && commentBody && commentBody.indexOf("/assign") == 0) {
|
||||
if (!issue.assignees || issue.assignees.length === 0) {
|
||||
await github.issues.addAssignees({
|
||||
|
@ -29,4 +56,4 @@ jobs:
|
|||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
479
.golangci.yml
479
.golangci.yml
|
@ -1,231 +1,248 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 5m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# list of build tags, all linters use it. Default is empty list.
|
||||
#build-tags:
|
||||
# - mytag
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- ^vendor$
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
|
||||
format: tab
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
# [deprecated] comma-separated list of pairs of the form pkg:regex
|
||||
# the regex is used to ignore names within pkg. (default "fmt:.*").
|
||||
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
|
||||
ignore: fmt:.*,io/ioutil:^Read.*
|
||||
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
exclude:
|
||||
|
||||
funlen:
|
||||
lines: 60
|
||||
statements: 40
|
||||
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
|
||||
# settings per analyzer
|
||||
settings:
|
||||
printf: # analyzer name, run `go tool vet help` to see all analyzers
|
||||
funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
|
||||
# enable or disable analyzers by name
|
||||
enable:
|
||||
- atomicalign
|
||||
enable-all: false
|
||||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes:
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
gocognit:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages-with-error-messages:
|
||||
# specify an error message to output when a blacklisted package is used
|
||||
github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: default
|
||||
ignore-words:
|
||||
- someword
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
enabled-checks:
|
||||
|
||||
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
|
||||
disabled-checks:
|
||||
- regexpMust
|
||||
- rangeValCopy
|
||||
- hugeParam
|
||||
- ifElseChain
|
||||
- singleCaseSwitch
|
||||
|
||||
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
|
||||
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
|
||||
enabled-tags:
|
||||
- performance
|
||||
|
||||
settings: # settings passed to gocritic
|
||||
captLocal: # must be valid enabled check name
|
||||
paramsOnly: true
|
||||
godox:
|
||||
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
|
||||
# might be left in the code accidentally and should be resolved before merging
|
||||
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
|
||||
- NOTE
|
||||
- OPTIMIZE # marks code that should be optimized before merging
|
||||
- HACK # marks hack-arounds that should be removed before merging
|
||||
dogsled:
|
||||
# checks assignments with too many blank identifiers; default is 2
|
||||
max-blank-identifiers: 2
|
||||
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
|
||||
wsl:
|
||||
# If true append is only allowed to be cuddled if appending value is
|
||||
# matching variables, fields or types on line above. Default is true.
|
||||
strict-append: true
|
||||
# Allow calls and assignments to be cuddled as long as the lines have any
|
||||
# matching variables, fields or types. Default is true.
|
||||
allow-assign-and-call: true
|
||||
# Allow multiline assignments to be cuddled. Default is true.
|
||||
allow-multiline-assign: true
|
||||
# Allow case blocks to end with a whitespace.
|
||||
allow-case-traling-whitespace: true
|
||||
# Allow declarations (var) to be cuddled.
|
||||
allow-cuddle-declarations: false
|
||||
|
||||
linters:
|
||||
fast: false
|
||||
enable-all: true
|
||||
disable:
|
||||
# TODO Enforce the below linters later
|
||||
- dupl
|
||||
- errcheck
|
||||
- funlen
|
||||
- gocyclo
|
||||
- gocognit
|
||||
- lll
|
||||
- scopelint
|
||||
- unparam
|
||||
- wsl
|
||||
- gomnd
|
||||
- godot
|
||||
- testpackage
|
||||
- goerr113
|
||||
- gci
|
||||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 5m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# list of build tags, all linters use it. Default is empty list.
|
||||
#build-tags:
|
||||
# - mytag
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- ^vendor$
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
|
||||
format: tab
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
# [deprecated] comma-separated list of pairs of the form pkg:regex
|
||||
# the regex is used to ignore names within pkg. (default "fmt:.*").
|
||||
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
|
||||
ignore: fmt:.*,io/ioutil:^Read.*
|
||||
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
exclude:
|
||||
|
||||
funlen:
|
||||
lines: 60
|
||||
statements: 40
|
||||
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
|
||||
# settings per analyzer
|
||||
settings:
|
||||
printf: # analyzer name, run `go tool vet help` to see all analyzers
|
||||
funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
|
||||
# enable or disable analyzers by name
|
||||
enable:
|
||||
- atomicalign
|
||||
enable-all: false
|
||||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/dapr/
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
gocognit:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 5
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/Sirupsen/logrus
|
||||
packages-with-error-messages:
|
||||
# specify an error message to output when a blacklisted package is used
|
||||
github.com/Sirupsen/logrus: "must use github.com/dapr/kit/logger"
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: default
|
||||
ignore-words:
|
||||
- someword
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
enabled-checks:
|
||||
|
||||
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
|
||||
disabled-checks:
|
||||
- regexpMust
|
||||
- rangeValCopy
|
||||
- hugeParam
|
||||
- ifElseChain
|
||||
- singleCaseSwitch
|
||||
- exitAfterDefer
|
||||
|
||||
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
|
||||
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
|
||||
enabled-tags:
|
||||
- performance
|
||||
|
||||
settings: # settings passed to gocritic
|
||||
captLocal: # must be valid enabled check name
|
||||
paramsOnly: true
|
||||
godox:
|
||||
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
|
||||
# might be left in the code accidentally and should be resolved before merging
|
||||
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
|
||||
- NOTE
|
||||
- OPTIMIZE # marks code that should be optimized before merging
|
||||
- HACK # marks hack-arounds that should be removed before merging
|
||||
godot:
|
||||
exclude:
|
||||
- 'nosec'
|
||||
- '\}'
|
||||
capital: false
|
||||
scope: all
|
||||
dogsled:
|
||||
# checks assignments with too many blank identifiers; default is 2
|
||||
max-blank-identifiers: 2
|
||||
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
|
||||
wsl:
|
||||
# If true append is only allowed to be cuddled if appending value is
|
||||
# matching variables, fields or types on line above. Default is true.
|
||||
strict-append: true
|
||||
# Allow calls and assignments to be cuddled as long as the lines have any
|
||||
# matching variables, fields or types. Default is true.
|
||||
allow-assign-and-call: true
|
||||
# Allow multiline assignments to be cuddled. Default is true.
|
||||
allow-multiline-assign: true
|
||||
# Allow case blocks to end with a whitespace.
|
||||
allow-case-traling-whitespace: true
|
||||
# Allow declarations (var) to be cuddled.
|
||||
allow-cuddle-declarations: false
|
||||
|
||||
linters:
|
||||
fast: false
|
||||
enable-all: true
|
||||
disable:
|
||||
# TODO Enforce the below linters later
|
||||
- dupl
|
||||
- errcheck
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocyclo
|
||||
- gocognit
|
||||
- godox
|
||||
- interfacer
|
||||
- lll
|
||||
- maligned
|
||||
- scopelint
|
||||
- unparam
|
||||
- wsl
|
||||
- gomnd
|
||||
- testpackage
|
||||
- goerr113
|
||||
- nestif
|
||||
- nlreturn
|
||||
- exhaustive
|
||||
- noctx
|
||||
- gci
|
||||
|
|
|
@ -72,7 +72,7 @@ func (s EnvironmentSettings) GetAzureEnvironment() (*azure.Environment, error) {
|
|||
// GetAuthorizer creates an Authorizer retrieved from, in order:
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
// 3. MSI
|
||||
// 3. MSI.
|
||||
func (s EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
|
||||
spt, err := s.GetServicePrincipalToken()
|
||||
if err != nil {
|
||||
|
@ -85,7 +85,7 @@ func (s EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
|
|||
// GetServicePrincipalToken returns a Service Principal Token retrieved from, in order:
|
||||
// 1. Client credentials
|
||||
// 2. Client certificate
|
||||
// 3. MSI
|
||||
// 3. MSI.
|
||||
func (s EnvironmentSettings) GetServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||
// 1. Client credentials
|
||||
if c, e := s.GetClientCredentials(); e == nil {
|
||||
|
@ -154,7 +154,7 @@ func (s EnvironmentSettings) GetMSI() MSIConfig {
|
|||
return config
|
||||
}
|
||||
|
||||
// CredentialsConfig provides the options to get a bearer authorizer from client credentials
|
||||
// CredentialsConfig provides the options to get a bearer authorizer from client credentials.
|
||||
type CredentialsConfig struct {
|
||||
*auth.ClientCredentialsConfig
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ const (
|
|||
fakeTenantID = "14bec2db-7f9a-4f3d-97ca-2d384ac83389"
|
||||
fakeClientID = "04bec2db-7f9a-4f3d-97ca-3d384ac83389"
|
||||
|
||||
// Base64 encoded test pfx cert - Expire date: 09/19/2119
|
||||
// Base64 encoded test pfx cert - Expire date: 09/19/2119.
|
||||
testCert = "MIIKTAIBAzCCCgwGCSqGSIb3DQEHAaCCCf0Eggn5MIIJ9TCCBhYGCSqGSIb3DQEHAaCCBgcEggYDMIIF/zCCBfsGCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAifAbe5KAL7IwICB9AEggTYZ3dAdDNqi5GoGJ/VfZhh8dxIIERUaC/SO5vKFhDfNu9VCQKF7Azr3eJ4cjzQmicfLd6FxJpB6d+8fbQuCcYPpTAdqf5zmLtZWMDWW8YZE0pV7b6sDZSw/NbT2zFhsx2uife6NnLK//Pj+GeALUDPfhVfqfLCfWZlCHxlbOipVZv9U4+TCVO2vyrGUq2XesT78cT+LhbHYkcrxTCsXNLWAvSJ9zXOIVA5HNS3Qv8pQJSSbqYVBbLk6FEbt5B3pk0xoA1hhM7dlCoGvPJ/ajvN3wAcEB5kmjJ4q59s2HeXloa7aAhXTFEkL2rZH+acgr1AO/DwcGXUqzJ2ooGYBfoqmgaXjydzyVLzYNccBGbzBR4Q0crMW6zDBXDlwvnLxmqZ7p05Ix9ZqISQyTm/DboNwQk1erOJd0fe6Brg1Dw4td6Uh/AXfM8m+XCGJFn79ZMCtd4rP8w9l008m8xe7rczSkMW0aRJVr0j3fFheene83jOHEB0q3KMKsVTkPWehnTGPj4TrsL+WwrmJpqrSloXMyaqvS9hvqAfPal0JI9taz6R5HFONaO6oi/ajpX3tYSX0rafQPKHmJpFLtJHYPopFYgP4akq8wKOCjq1IDg3ZW59G9nh8Vcw3IrAnr+C9iMgzPUvCHCinQK24cmbn5px6S0U0ARhY90KrSMFRyjvxNpZzc+A/AAaQ/wwuLVy1GyuZ2sRFyVSCTRMC6ZfXAUs+OijDO/B++BCdmqm5p5/aZpQYf1cb681AaDc/5XTHtCC3setYfpviMe1grvp4jaPVrjnG85pVenZJ0d+Xo7BnD38Ec5RsKpvtXIieiRIbnGqzTzxj/OU/cdglrKy8MLo6IJigXA6N3x14o4e3akq7cvLPRQZqlWyLqjlGnJdZKJlemFlOnDSluzwGBwwKF+PpXuRVSDhi/ARN3g8L+wVAQQMEylWJfK7sNDun41rimE8wGFjqlfZNVg/pCBKvw3p90pCkxVUEZBRrP1vaGzrIvOsMU/rrJqQU7Imv9y6nUrvHdcoRFUdbgWVWZus6VwTrgwRkfnPiLZo0r5Vh4kComH0+Tc4kgwbnnuQQWzn8J9Ur4Nu0MkknC/1jDwulq2XOIBPclmEPg9CSSwfKonyaRxz+3GoPy0kGdHwsOcXIq5qBIyiYAtM1g1cQLtOT16OCjapus+GIOLnItP2OAhO70dsTMUlsQSNEH+KxUxFb1pFuQGXnStmgZtHYI4LvC/d820tY0m0I6SgfabnoQpIXa6iInIt970awwyUP1P/6m9ie5bCRDWCj4R0bNiNQBjq9tHfO4xeGK+fUTyeU4OEBgiyisNVhijf6GlfPHKWwkInAN0WbS3UHHACjkP0jmRb70b/3VbWon/+K5S6bk2ohIDsbPPVolTvfMehRwKatqQTbTXlnDIHJQzk9SfHHWJzkrQXEIbXgGxHSHm5CmNetR/MYGlivjtGRVxOLr7Y1tK0GGEDMs9nhiSvlwWjAEuwIN+72T6Kx7hPRld1BvaTYLRYXfjnedo7D2AoR+8tGLWjU31rHJVua/JILjGC84ARCjk5LOFHOXUjOP1jJomh8ebjlVijNWP0gLUC14AE8UJsJ1Xi6xiNOTeMpeOIJl2kX81uvnNbQ0j4WajfXlox5eV+0iJ1yNfw5jGB6TATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IADgAZABlADYANgA5AGEAYQAtADUAZgAyAGMALQA0ADIANgBmAC0AYQA3ADAANwAtADIANgBmADkAOAAwADAANAAwAGEAYQAwMHkGCSsGAQQBgjcRATFsHmoATQBpAGMAcgBvAHMAbwBmAHQAIABFAG4AaABhAG4AYwBlAGQAIABSAFMAQQAgAGEAbgBkACAAQQBFAFMAIABDAHIAeQBwAHQAbwBnAHIAYQBwAGgAaQBjACAAUAByAG8AdgBpAGQAZQByMIID1wYJKoZIhvcNAQcGoIIDyDCCA8QCAQAwggO9BgkqhkiG9w0BBwEwHAYKKoZIhvcNAQwBBjAOBAiT1ngppOJy/gICB9CAggOQt9iTz9CmP/3+EBQv3WM80jLHHyrkJM5nIckr+4fmcl3frhbZZajSf1eigjOaqWpz1cAu9KtSAb0Fa35AKr7r9du5SXwBxyYS6XzXsWekSrdvh3Dui0abXo/yh+lIfI/61sJLv5Gc7/DbJrwlHHOD1DR/ohmncAiSjGUYaO9/Y9xUV3cbzjZypqKkkbahaWVMC8+D9zUSkH64RUuLvSi5X5QKFsICNouBL1j/C2s3VZoyR9F0ajRCEMFnQsMfJ/1fP2iW/wwFIARBjphj1SaEaP3XkxQadslR0cwhf6Ujj/tXyd1zV5oI8rJ54r8eN5Vu8NxEX3kl+A7gCc9ACEC0klZ18mQUjb6eDpUSFM63/wx7ISDKaD7gyWCul1JwlUmYzvrRw8sAwjVEyXzc+n0oIOlk0lE6vk3mybkfcOxafRkdr0zVnd5L+XtV/V38sd3ExNojQgUDNy905PNTHdeVnvHt6E8XGNgGX7a/tB1r7Un3soL5Vjcuf/HMdyR57CF2lxFSrdZ1bNnw7Z1GJbQZHago2AovNw+BbBJfey0iuIRP+dgkIfle0nzl3E7T9jU0r2+GEQfN7YYjRL19XFX4n8kNpiTDDRxdNj/yKQDfC7f8prZY/yP8bJLaFBd+uoH+D4QKmWk7plwXTOLiNno9cOTrLYT48HCEghtBbnTgZglOg8eDZd35MR5KcCNWxVy/enEj3/BEtkH7qnJsxlFMu1WwAQzaVYK1u1sGCD8NGH2wtiJi0O5q+YsQItv7ia2x9lSL1JPagtRhxnIZbC5HaIx87bSrVY9XTrWlj9X0H+YSdbUrszRse+LLJkw6h8wXqBvrBKsxnPrfJyQWs3zqehk0FPF1pi+spoJzp7//nmZ5a7knRXYkxV++TiuX+RQSNR/cFxezEwR+2WUAJaJfPpSf06dp5M/gJNVJQGMNiLHCMc9w6CPLUFQA1FG5YdK8nFrSo0iclX7wAHWpCjkqHj7PgOT+Ia5qiOb2dN2GBWPh5N94PO15BLlS/9UUvGxvmWqmG3lpr3hP5B6OZdQl8lxBGc8KTq4GdoJrQ+Jmfej3LQa33mV5VZwJqdbH9iEHvUH2VYC8ru7r5drXBqP5IlZrkdIL5uzzaoHsnWtu0OKgjwRwXaAF24zM0GVXbueGXLXH3vwBwoO4GnDfJ0wN0qFEJBRexRdPP9JKjPfVmwbi89sx1zJMId3nCmetq5yGMDcwHzAHBgUrDgMCGgQUmQChLB4WJjopytxl4LNQ9NuCbPkEFO+tI0n+7a6hwK9hqzq7tghkXp08"
|
||||
)
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
package azure
|
||||
|
||||
// MetadataKeys : Keys for all metadata properties
|
||||
// MetadataKeys : Keys for all metadata properties.
|
||||
var MetadataKeys = map[string][]string{ // nolint: gochecknoglobals
|
||||
// clientId, clientSecret, tenantId are supported for backwards-compatibility as they're used by some components, but should be considered deprecated
|
||||
|
||||
|
@ -29,5 +29,5 @@ var MetadataKeys = map[string][]string{ // nolint: gochecknoglobals
|
|||
"AzureEnvironment": {"azureEnvironment"},
|
||||
}
|
||||
|
||||
// Default Azure environment
|
||||
// Default Azure environment.
|
||||
const DefaultAzureEnvironment = "AZUREPUBLICCLOUD"
|
||||
|
|
|
@ -27,7 +27,7 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// GetKubeClient returns a kubernetes client
|
||||
// GetKubeClient returns a kubernetes client.
|
||||
func GetKubeClient() (*kubernetes.Clientset, error) {
|
||||
flag.Parse()
|
||||
conf, err := rest.InClusterConfig()
|
||||
|
|
|
@ -72,7 +72,7 @@ func NewDingTalkWebhook(l logger.Logger) *DingTalkWebhook {
|
|||
}
|
||||
}
|
||||
|
||||
// Init performs metadata parsing
|
||||
// Init performs metadata parsing.
|
||||
func (t *DingTalkWebhook) Init(metadata bindings.Metadata) error {
|
||||
var err error
|
||||
if err = t.settings.Decode(metadata.Properties); err != nil {
|
||||
|
@ -85,7 +85,7 @@ func (t *DingTalkWebhook) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Read triggers the outgoing webhook, not yet production ready
|
||||
// Read triggers the outgoing webhook, not yet production ready.
|
||||
func (t *DingTalkWebhook) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
t.logger.Debugf("dingtalk webhook: start read input binding")
|
||||
|
||||
|
@ -100,7 +100,7 @@ func (t *DingTalkWebhook) Read(handler func(*bindings.ReadResponse) ([]byte, err
|
|||
return nil
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by dingtalk webhook binding
|
||||
// Operations returns list of operations supported by dingtalk webhook binding.
|
||||
func (t *DingTalkWebhook) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation, bindings.GetOperation}
|
||||
}
|
||||
|
|
|
@ -13,12 +13,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/nacos-group/nacos-sdk-go/clients"
|
||||
"github.com/nacos-group/nacos-sdk-go/clients/config_client"
|
||||
"github.com/nacos-group/nacos-sdk-go/common/constant"
|
||||
"github.com/nacos-group/nacos-sdk-go/vo"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,13 +30,13 @@ const (
|
|||
metadataConfigOnchange = "config-onchange"
|
||||
)
|
||||
|
||||
// Config type
|
||||
// Config type.
|
||||
type configParam struct {
|
||||
dataID string
|
||||
group string
|
||||
}
|
||||
|
||||
// Nacos allows reading/writing to a Nacos server
|
||||
// Nacos allows reading/writing to a Nacos server.
|
||||
type Nacos struct {
|
||||
settings Settings
|
||||
config configParam
|
||||
|
@ -46,12 +47,12 @@ type Nacos struct {
|
|||
readHandler func(response *bindings.ReadResponse) ([]byte, error)
|
||||
}
|
||||
|
||||
// NewNacos returns a new Nacos instance
|
||||
// NewNacos returns a new Nacos instance.
|
||||
func NewNacos(logger logger.Logger) *Nacos {
|
||||
return &Nacos{logger: logger} //nolint:exhaustivestruct
|
||||
}
|
||||
|
||||
// Init implements InputBinding/OutputBinding's Init method
|
||||
// Init implements InputBinding/OutputBinding's Init method.
|
||||
func (n *Nacos) Init(metadata bindings.Metadata) error {
|
||||
n.settings = Settings{
|
||||
Timeout: defaultTimeout,
|
||||
|
@ -125,7 +126,7 @@ func (n *Nacos) createConfigClient() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Read implements InputBinding's Read method
|
||||
// Read implements InputBinding's Read method.
|
||||
func (n *Nacos) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
n.readHandler = handler
|
||||
|
||||
|
@ -143,7 +144,7 @@ func (n *Nacos) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Invoke implements OutputBinding's Invoke method
|
||||
// Invoke implements OutputBinding's Invoke method.
|
||||
func (n *Nacos) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
switch req.Operation {
|
||||
case bindings.CreateOperation:
|
||||
|
@ -157,7 +158,7 @@ func (n *Nacos) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, e
|
|||
}
|
||||
}
|
||||
|
||||
// Operations implements OutputBinding's Operations method
|
||||
// Operations implements OutputBinding's Operations method.
|
||||
func (n *Nacos) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation, bindings.GetOperation}
|
||||
}
|
||||
|
|
|
@ -13,9 +13,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInputBindingRead(t *testing.T) { //nolint:paralleltest
|
||||
|
|
|
@ -10,12 +10,13 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// AliCloudOSS is a binding for an AliCloud OSS storage bucket
|
||||
// AliCloudOSS is a binding for an AliCloud OSS storage bucket.
|
||||
type AliCloudOSS struct {
|
||||
metadata *ossMetadata
|
||||
client *oss.Client
|
||||
|
@ -29,12 +30,12 @@ type ossMetadata struct {
|
|||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
// NewAliCloudOSS returns a new instance
|
||||
// NewAliCloudOSS returns a new instance.
|
||||
func NewAliCloudOSS(logger logger.Logger) *AliCloudOSS {
|
||||
return &AliCloudOSS{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection creation
|
||||
// Init does metadata parsing and connection creation.
|
||||
func (s *AliCloudOSS) Init(metadata bindings.Metadata) error {
|
||||
m, err := s.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,8 +8,9 @@ package oss
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
mqc "github.com/apache/rocketmq-client-go/v2/consumer"
|
||||
"github.com/apache/rocketmq-client-go/v2/primitive"
|
||||
mqw "github.com/cinience/go_rocketmq"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/dapr/kit/retry"
|
||||
|
@ -42,7 +43,7 @@ func NewAliCloudRocketMQ(l logger.Logger) *AliCloudRocketMQ {
|
|||
}
|
||||
}
|
||||
|
||||
// Init performs metadata parsing
|
||||
// Init performs metadata parsing.
|
||||
func (a *AliCloudRocketMQ) Init(metadata bindings.Metadata) error {
|
||||
var err error
|
||||
if err = a.settings.Decode(metadata.Properties); err != nil {
|
||||
|
@ -68,7 +69,7 @@ func (a *AliCloudRocketMQ) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Read triggers the rocketmq subscription
|
||||
// Read triggers the rocketmq subscription.
|
||||
func (a *AliCloudRocketMQ) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
a.logger.Debugf("binding rocketmq: start read input binding")
|
||||
|
||||
|
@ -183,7 +184,7 @@ func (a *AliCloudRocketMQ) setupPublisher() (mqw.Producer, error) {
|
|||
return nil, errors.New("binding-rocketmq error: cannot found rocketmq producer")
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by rocketmq binding
|
||||
// Operations returns list of operations supported by rocketmq binding.
|
||||
func (a *AliCloudRocketMQ) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
|
|
@ -10,10 +10,11 @@ import (
|
|||
"strings"
|
||||
|
||||
rocketmq "github.com/cinience/go_rocketmq"
|
||||
|
||||
"github.com/dapr/kit/config"
|
||||
)
|
||||
|
||||
// rocketmq
|
||||
// rocketmq.
|
||||
const (
|
||||
metadataRocketmqTopic = "rocketmq-topic"
|
||||
metadataRocketmqTag = "rocketmq-tag"
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTableStoreMetadata(t *testing.T) {
|
||||
|
|
|
@ -15,9 +15,10 @@ import (
|
|||
"net/http"
|
||||
"sync"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -12,10 +12,11 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -9,8 +9,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/golang-jwt/jwt"
|
||||
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// The "issued at" timestamp in the JWT must be within one hour from the
|
||||
|
|
|
@ -11,12 +11,13 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
|
||||
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// DynamoDB allows performing stateful operations on AWS DynamoDB
|
||||
// DynamoDB allows performing stateful operations on AWS DynamoDB.
|
||||
type DynamoDB struct {
|
||||
client *dynamodb.DynamoDB
|
||||
table string
|
||||
|
@ -32,12 +33,12 @@ type dynamoDBMetadata struct {
|
|||
Table string `json:"table"`
|
||||
}
|
||||
|
||||
// NewDynamoDB returns a new DynamoDB instance
|
||||
// NewDynamoDB returns a new DynamoDB instance.
|
||||
func NewDynamoDB(logger logger.Logger) *DynamoDB {
|
||||
return &DynamoDB{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs connection parsing for DynamoDB
|
||||
// Init performs connection parsing for DynamoDB.
|
||||
func (d *DynamoDB) Init(metadata bindings.Metadata) error {
|
||||
meta, err := d.getDynamoDBMetadata(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,8 +8,9 @@ package dynamodb
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -19,16 +19,17 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/google/uuid"
|
||||
"github.com/vmware/vmware-go-kcl/clientlibrary/config"
|
||||
"github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
|
||||
"github.com/vmware/vmware-go-kcl/clientlibrary/worker"
|
||||
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// AWSKinesis allows receiving and sending data to/from AWS Kinesis stream
|
||||
// AWSKinesis allows receiving and sending data to/from AWS Kinesis stream.
|
||||
type AWSKinesis struct {
|
||||
client *kinesis.Kinesis
|
||||
metadata *kinesisMetadata
|
||||
|
@ -55,16 +56,16 @@ type kinesisMetadata struct {
|
|||
type kinesisConsumerMode string
|
||||
|
||||
const (
|
||||
// ExtendedFanout - dedicated throughput through data stream api
|
||||
// ExtendedFanout - dedicated throughput through data stream api.
|
||||
ExtendedFanout kinesisConsumerMode = "extended"
|
||||
|
||||
// SharedThroughput - shared throughput using checkpoint and monitoring
|
||||
// SharedThroughput - shared throughput using checkpoint and monitoring.
|
||||
SharedThroughput kinesisConsumerMode = "shared"
|
||||
|
||||
partitionKeyName = "partitionKey"
|
||||
)
|
||||
|
||||
// recordProcessorFactory
|
||||
// recordProcessorFactory.
|
||||
type recordProcessorFactory struct {
|
||||
logger logger.Logger
|
||||
handler func(*bindings.ReadResponse) ([]byte, error)
|
||||
|
@ -75,12 +76,12 @@ type recordProcessor struct {
|
|||
handler func(*bindings.ReadResponse) ([]byte, error)
|
||||
}
|
||||
|
||||
// NewAWSKinesis returns a new AWS Kinesis instance
|
||||
// NewAWSKinesis returns a new AWS Kinesis instance.
|
||||
func NewAWSKinesis(logger logger.Logger) *AWSKinesis {
|
||||
return &AWSKinesis{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection creation
|
||||
// Init does metadata parsing and connection creation.
|
||||
func (a *AWSKinesis) Init(metadata bindings.Metadata) error {
|
||||
m, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -169,7 +170,7 @@ func (a *AWSKinesis) Read(handler func(*bindings.ReadResponse) ([]byte, error))
|
|||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to all shards
|
||||
// Subscribe to all shards.
|
||||
func (a *AWSKinesis) Subscribe(ctx context.Context, streamDesc kinesis.StreamDescription, handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
consumerARN, err := a.ensureConsumer(streamDesc.StreamARN)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,8 +8,9 @@ package kinesis
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -16,10 +16,11 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/google/uuid"
|
||||
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -31,7 +32,7 @@ const (
|
|||
maxResults = 1000
|
||||
)
|
||||
|
||||
// AWSS3 is a binding for an AWS S3 storage bucket
|
||||
// AWSS3 is a binding for an AWS S3 storage bucket.
|
||||
type AWSS3 struct {
|
||||
metadata *s3Metadata
|
||||
s3Client *s3.S3
|
||||
|
@ -63,12 +64,12 @@ type listPayload struct {
|
|||
Delimiter string `json:"delimiter"`
|
||||
}
|
||||
|
||||
// NewAWSS3 returns a new AWSS3 instance
|
||||
// NewAWSS3 returns a new AWSS3 instance.
|
||||
func NewAWSS3(logger logger.Logger) *AWSS3 {
|
||||
return &AWSS3{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection creation
|
||||
// Init does metadata parsing and connection creation.
|
||||
func (s *AWSS3) Init(metadata bindings.Metadata) error {
|
||||
m, err := s.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -277,7 +278,7 @@ func (s *AWSS3) getSession(metadata *s3Metadata) (*session.Session, error) {
|
|||
return sess, nil
|
||||
}
|
||||
|
||||
// Helper to merge config and request metadata
|
||||
// Helper to merge config and request metadata.
|
||||
func (metadata s3Metadata) mergeWithRequestMetadata(req *bindings.InvokeRequest) (s3Metadata, error) {
|
||||
merged := metadata
|
||||
|
||||
|
|
|
@ -8,9 +8,10 @@ package s3
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -13,9 +13,11 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ses"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
@ -25,7 +27,7 @@ const (
|
|||
CharSet = "UTF-8"
|
||||
)
|
||||
|
||||
// AWSSES is an AWS SNS binding
|
||||
// AWSSES is an AWS SNS binding.
|
||||
type AWSSES struct {
|
||||
metadata *sesMetadata
|
||||
logger logger.Logger
|
||||
|
@ -44,12 +46,12 @@ type sesMetadata struct {
|
|||
EmailBcc string `json:"emailBcc"`
|
||||
}
|
||||
|
||||
// NewAWSSES creates a new AWSSES binding instance
|
||||
// NewAWSSES creates a new AWSSES binding instance.
|
||||
func NewAWSSES(logger logger.Logger) *AWSSES {
|
||||
return &AWSSES{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing
|
||||
// Init does metadata parsing.
|
||||
func (a *AWSSES) Init(metadata bindings.Metadata) error {
|
||||
// Parse input metadata
|
||||
meta, err := a.parseMetadata(metadata)
|
||||
|
@ -153,7 +155,7 @@ func (a *AWSSES) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse,
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Helper to merge config and request metadata
|
||||
// Helper to merge config and request metadata.
|
||||
func (metadata sesMetadata) mergeWithRequestMetadata(req *bindings.InvokeRequest) sesMetadata {
|
||||
merged := metadata
|
||||
|
||||
|
|
|
@ -8,9 +8,10 @@ package ses
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -10,12 +10,13 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/sns"
|
||||
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// AWSSNS is an AWS SNS binding
|
||||
// AWSSNS is an AWS SNS binding.
|
||||
type AWSSNS struct {
|
||||
client *sns.SNS
|
||||
topicARN string
|
||||
|
@ -37,12 +38,12 @@ type dataPayload struct {
|
|||
Subject interface{} `json:"subject"`
|
||||
}
|
||||
|
||||
// NewAWSSNS creates a new AWSSNS binding instance
|
||||
// NewAWSSNS creates a new AWSSNS binding instance.
|
||||
func NewAWSSNS(logger logger.Logger) *AWSSNS {
|
||||
return &AWSSNS{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing
|
||||
// Init does metadata parsing.
|
||||
func (a *AWSSNS) Init(metadata bindings.Metadata) error {
|
||||
m, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,8 +8,9 @@ package sns
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -11,12 +11,13 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/sqs"
|
||||
|
||||
aws_auth "github.com/dapr/components-contrib/authentication/aws"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// AWSSQS allows receiving and sending data to/from AWS SQS
|
||||
// AWSSQS allows receiving and sending data to/from AWS SQS.
|
||||
type AWSSQS struct {
|
||||
Client *sqs.SQS
|
||||
QueueURL *string
|
||||
|
@ -33,12 +34,12 @@ type sqsMetadata struct {
|
|||
SessionToken string `json:"sessionToken"`
|
||||
}
|
||||
|
||||
// NewAWSSQS returns a new AWS SQS instance
|
||||
// NewAWSSQS returns a new AWS SQS instance.
|
||||
func NewAWSSQS(logger logger.Logger) *AWSSQS {
|
||||
return &AWSSQS{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection creation
|
||||
// Init does metadata parsing and connection creation.
|
||||
func (a *AWSSQS) Init(metadata bindings.Metadata) error {
|
||||
m, err := a.parseSQSMetadata(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,8 +8,9 @@ package sqs
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -16,22 +16,23 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Used to reference the blob relative to the container
|
||||
// Used to reference the blob relative to the container.
|
||||
metadataKeyBlobName = "blobName"
|
||||
// A string value that identifies the portion of the list to be returned with the next list operation.
|
||||
// The operation returns a marker value within the response body if the list returned was not complete. The marker
|
||||
// value may then be used in a subsequent call to request the next set of list items.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs#uri-parameters
|
||||
metadataKeyMarker = "marker"
|
||||
// The number of blobs that will be returned in a list operation
|
||||
// The number of blobs that will be returned in a list operation.
|
||||
metadataKeyNumber = "number"
|
||||
// Defines if the user defined metadata should be returned in the get operation
|
||||
// Defines if the user defined metadata should be returned in the get operation.
|
||||
metadataKeyIncludeMetadata = "includeMetadata"
|
||||
// Defines the delete snapshots option for the delete operation.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob#request-headers
|
||||
|
@ -45,7 +46,7 @@ const (
|
|||
metadataKeyContentDisposition = "contentDisposition"
|
||||
meatdataKeyCacheControl = "cacheControl"
|
||||
// Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader. A value
|
||||
// of zero means that no additional HTTP GET requests will be made
|
||||
// of zero means that no additional HTTP GET requests will be made.
|
||||
defaultGetBlobRetryCount = 10
|
||||
// Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not
|
||||
// specify maxresults the server will return up to 5,000 items.
|
||||
|
@ -65,7 +66,7 @@ const (
|
|||
|
||||
var ErrMissingBlobName = errors.New("blobName is a required attribute")
|
||||
|
||||
// AzureBlobStorage allows saving blobs to an Azure Blob Storage account
|
||||
// AzureBlobStorage allows saving blobs to an Azure Blob Storage account.
|
||||
type AzureBlobStorage struct {
|
||||
metadata *blobStorageMetadata
|
||||
containerURL azblob.ContainerURL
|
||||
|
@ -101,12 +102,12 @@ type listPayload struct {
|
|||
Include listInclude `json:"include"`
|
||||
}
|
||||
|
||||
// NewAzureBlobStorage returns a new Azure Blob Storage instance
|
||||
// NewAzureBlobStorage returns a new Azure Blob Storage instance.
|
||||
func NewAzureBlobStorage(logger logger.Logger) *AzureBlobStorage {
|
||||
return &AzureBlobStorage{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs metadata parsing
|
||||
// Init performs metadata parsing.
|
||||
func (a *AzureBlobStorage) Init(metadata bindings.Metadata) error {
|
||||
m, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -118,7 +119,12 @@ func (a *AzureBlobStorage) Init(metadata bindings.Metadata) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("invalid credentials with error: %w", err)
|
||||
}
|
||||
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
|
||||
userAgent := "dapr-" + logger.DaprVersion
|
||||
options := azblob.PipelineOptions{
|
||||
Telemetry: azblob.TelemetryOptions{Value: userAgent},
|
||||
}
|
||||
p := azblob.NewPipeline(credential, options)
|
||||
|
||||
containerName := a.metadata.Container
|
||||
URL, _ := url.Parse(
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -11,12 +11,14 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/a8m/documentdb"
|
||||
|
||||
"github.com/dapr/components-contrib/authentication/azure"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// CosmosDB allows performing state operations on collections
|
||||
// CosmosDB allows performing state operations on collections.
|
||||
type CosmosDB struct {
|
||||
client *documentdb.DocumentDB
|
||||
collection *documentdb.Collection
|
||||
|
@ -34,12 +36,12 @@ type cosmosDBCredentials struct {
|
|||
PartitionKey string `json:"partitionKey"`
|
||||
}
|
||||
|
||||
// NewCosmosDB returns a new CosmosDB instance
|
||||
// NewCosmosDB returns a new CosmosDB instance.
|
||||
func NewCosmosDB(logger logger.Logger) *CosmosDB {
|
||||
return &CosmosDB{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs CosmosDB connection parsing and connecting
|
||||
// Init performs CosmosDB connection parsing and connecting.
|
||||
func (c *CosmosDB) Init(metadata bindings.Metadata) error {
|
||||
m, err := c.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -66,6 +68,10 @@ func (c *CosmosDB) Init(metadata bindings.Metadata) error {
|
|||
}
|
||||
config = documentdb.NewConfigWithServicePrincipal(spt)
|
||||
}
|
||||
// disable the identification hydrator (which autogenerates IDs if missing from the request)
|
||||
// so we aren't forced to use a struct by the upstream SDK
|
||||
// this allows us to provide the most flexibility in the request document sent to this binding
|
||||
config.IdentificationHydrator = nil
|
||||
client := documentdb.New(m.URL, config)
|
||||
|
||||
dbs, err := client.QueryDatabases(&documentdb.Query{
|
||||
|
@ -120,23 +126,28 @@ func (c *CosmosDB) Operations() []bindings.OperationKind {
|
|||
}
|
||||
|
||||
func (c *CosmosDB) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var obj interface{}
|
||||
err := json.Unmarshal(req.Data, &obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch req.Operation {
|
||||
case bindings.CreateOperation:
|
||||
var obj interface{}
|
||||
err := json.Unmarshal(req.Data, &obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val, err := c.getPartitionKeyValue(c.partitionKey, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val, err := c.getPartitionKeyValue(c.partitionKey, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = c.client.CreateDocument(c.collection.Self, obj, documentdb.PartitionKey(val))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = c.client.CreateDocument(c.collection.Self, obj, documentdb.PartitionKey(val))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("operation kind %s not supported", req.Operation)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CosmosDB) getPartitionKeyValue(key string, obj interface{}) (interface{}, error) {
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -11,18 +11,19 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
gremcos "github.com/supplyon/gremcos"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
gremcos "github.com/supplyon/gremcos"
|
||||
)
|
||||
|
||||
const (
|
||||
queryOperation bindings.OperationKind = "query"
|
||||
|
||||
// keys from request's Data
|
||||
// keys from request's Data.
|
||||
commandGremlinKey = "gremlin"
|
||||
|
||||
// keys from response's Data
|
||||
// keys from response's Data.
|
||||
respGremlinKey = "gremlin"
|
||||
respOpKey = "operation"
|
||||
respStartTimeKey = "start-time"
|
||||
|
@ -30,7 +31,7 @@ const (
|
|||
respDurationKey = "duration"
|
||||
)
|
||||
|
||||
// CosmosGraphDB allows performing state operations on collections
|
||||
// CosmosGraphDB allows performing state operations on collections.
|
||||
type CosmosGraphDB struct {
|
||||
metadata *cosmosGraphDBCredentials
|
||||
client *gremcos.Cosmos
|
||||
|
@ -43,12 +44,12 @@ type cosmosGraphDBCredentials struct {
|
|||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// NewCosmosGraphDB returns a new CosmosGraphDB instance
|
||||
// NewCosmosGraphDB returns a new CosmosGraphDB instance.
|
||||
func NewCosmosGraphDB(logger logger.Logger) *CosmosGraphDB {
|
||||
return &CosmosGraphDB{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs CosmosDB connection parsing and connecting
|
||||
// Init performs CosmosDB connection parsing and connecting.
|
||||
func (c *CosmosGraphDB) Init(metadata bindings.Metadata) error {
|
||||
c.logger.Debug("Initializing Cosmos Graph DB binding")
|
||||
|
||||
|
|
|
@ -8,9 +8,10 @@ package cosmosgraphdb
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -15,15 +15,17 @@ import (
|
|||
|
||||
"github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2020-04-01-preview/eventgrid"
|
||||
"github.com/Azure/go-autorest/autorest/azure/auth"
|
||||
"github.com/valyala/fasthttp"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
// AzureEventGrid allows sending/receiving Azure Event Grid events
|
||||
// AzureEventGrid allows sending/receiving Azure Event Grid events.
|
||||
type AzureEventGrid struct {
|
||||
metadata *azureEventGridMetadata
|
||||
logger logger.Logger
|
||||
metadata *azureEventGridMetadata
|
||||
logger logger.Logger
|
||||
userAgent string
|
||||
}
|
||||
|
||||
type azureEventGridMetadata struct {
|
||||
|
@ -47,13 +49,14 @@ type azureEventGridMetadata struct {
|
|||
TopicEndpoint string `json:"topicEndpoint"`
|
||||
}
|
||||
|
||||
// NewAzureEventGrid returns a new Azure Event Grid instance
|
||||
// NewAzureEventGrid returns a new Azure Event Grid instance.
|
||||
func NewAzureEventGrid(logger logger.Logger) *AzureEventGrid {
|
||||
return &AzureEventGrid{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs metadata init
|
||||
// Init performs metadata init.
|
||||
func (a *AzureEventGrid) Init(metadata bindings.Metadata) error {
|
||||
a.userAgent = "dapr-" + logger.DaprVersion
|
||||
m, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -125,6 +128,7 @@ func (a *AzureEventGrid) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeRe
|
|||
request.Header.SetMethod(fasthttp.MethodPost)
|
||||
request.Header.Set("Content-Type", "application/cloudevents+json")
|
||||
request.Header.Set("aeg-sas-key", a.metadata.AccessKey)
|
||||
request.Header.Set("User-Agent", a.userAgent)
|
||||
request.SetRequestURI(a.metadata.TopicEndpoint)
|
||||
request.SetBody(req.Data)
|
||||
|
||||
|
@ -221,6 +225,7 @@ func (a *AzureEventGrid) createSubscription() error {
|
|||
clientCredentialsConfig := auth.NewClientCredentialsConfig(a.metadata.ClientID, a.metadata.ClientSecret, a.metadata.TenantID)
|
||||
|
||||
subscriptionClient := eventgrid.NewEventSubscriptionsClient(a.metadata.SubscriptionID)
|
||||
subscriptionClient.AddToUserAgent(a.userAgent)
|
||||
authorizer, err := clientCredentialsConfig.Authorizer()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -8,8 +8,9 @@ package eventgrid
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -20,32 +20,33 @@ import (
|
|||
"github.com/Azure/azure-event-hubs-go/v3/storage"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadata
|
||||
// metadata.
|
||||
connectionString = "connectionString"
|
||||
|
||||
// required by subscriber
|
||||
// required by subscriber.
|
||||
consumerGroup = "consumerGroup"
|
||||
storageAccountName = "storageAccountName"
|
||||
storageAccountKey = "storageAccountKey"
|
||||
storageContainerName = "storageContainerName"
|
||||
|
||||
// optional
|
||||
// optional.
|
||||
partitionKeyName = "partitionKey"
|
||||
partitionIDName = "partitionID"
|
||||
|
||||
// errors
|
||||
// errors.
|
||||
missingConnectionStringErrorMsg = "error: connectionString is a required attribute"
|
||||
missingStorageAccountNameErrorMsg = "error: storageAccountName is a required attribute"
|
||||
missingStorageAccountKeyErrorMsg = "error: storageAccountKey is a required attribute"
|
||||
missingStorageContainerNameErrorMsg = "error: storageContainerName is a required attribute"
|
||||
missingConsumerGroupErrorMsg = "error: consumerGroup is a required attribute"
|
||||
|
||||
// Event Hubs SystemProperties names for metadata passthrough
|
||||
// Event Hubs SystemProperties names for metadata passthrough.
|
||||
sysPropSequenceNumber = "x-opt-sequence-number"
|
||||
sysPropEnqueuedTime = "x-opt-enqueued-time"
|
||||
sysPropOffset = "x-opt-offset"
|
||||
|
@ -98,7 +99,7 @@ func readHandler(e *eventhub.Event, handler func(*bindings.ReadResponse) ([]byte
|
|||
return err
|
||||
}
|
||||
|
||||
// AzureEventHubs allows sending/receiving Azure Event Hubs events
|
||||
// AzureEventHubs allows sending/receiving Azure Event Hubs events.
|
||||
type AzureEventHubs struct {
|
||||
hub *eventhub.Hub
|
||||
metadata *azureEventHubsMetadata
|
||||
|
@ -120,24 +121,29 @@ func (m azureEventHubsMetadata) partitioned() bool {
|
|||
return m.partitionID != ""
|
||||
}
|
||||
|
||||
// NewAzureEventHubs returns a new Azure Event hubs instance
|
||||
// NewAzureEventHubs returns a new Azure Event hubs instance.
|
||||
func NewAzureEventHubs(logger logger.Logger) *AzureEventHubs {
|
||||
return &AzureEventHubs{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs metadata init
|
||||
// Init performs metadata init.
|
||||
func (a *AzureEventHubs) Init(metadata bindings.Metadata) error {
|
||||
m, err := parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userAgent := "dapr-" + logger.DaprVersion
|
||||
a.metadata = m
|
||||
hub, err := eventhub.NewHubFromConnectionString(a.metadata.connectionString)
|
||||
hub, err := eventhub.NewHubFromConnectionString(a.metadata.connectionString,
|
||||
eventhub.HubWithUserAgent(userAgent),
|
||||
)
|
||||
|
||||
// Create partitioned sender if the partitionID is configured
|
||||
if a.metadata.partitioned() {
|
||||
hub, err = eventhub.NewHubFromConnectionString(a.metadata.connectionString,
|
||||
eventhub.HubWithPartitionedSender(a.metadata.partitionID))
|
||||
eventhub.HubWithPartitionedSender(a.metadata.partitionID),
|
||||
eventhub.HubWithUserAgent(userAgent),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -197,7 +203,7 @@ func (a *AzureEventHubs) Operations() []bindings.OperationKind {
|
|||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
||||
// Write posts an event hubs message
|
||||
// Write posts an event hubs message.
|
||||
func (a *AzureEventHubs) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
event := &eventhub.Event{
|
||||
Data: req.Data,
|
||||
|
@ -221,7 +227,7 @@ func (a *AzureEventHubs) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeRe
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Read gets messages from eventhubs in a non-blocking fashion
|
||||
// Read gets messages from eventhubs in a non-blocking fashion.
|
||||
func (a *AzureEventHubs) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
if !a.metadata.partitioned() {
|
||||
if err := a.RegisterEventProcessor(handler); err != nil {
|
||||
|
@ -243,7 +249,7 @@ func (a *AzureEventHubs) Read(handler func(*bindings.ReadResponse) ([]byte, erro
|
|||
return nil
|
||||
}
|
||||
|
||||
// RegisterPartitionedEventProcessor - receive eventhub messages by partitionID
|
||||
// RegisterPartitionedEventProcessor - receive eventhub messages by partitionID.
|
||||
func (a *AzureEventHubs) RegisterPartitionedEventProcessor(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -292,7 +298,7 @@ func contains(arr []string, str string) bool {
|
|||
}
|
||||
|
||||
// RegisterEventProcessor - receive eventhub messages by eventprocessor
|
||||
// host by balancing partitions
|
||||
// host by balancing partitions.
|
||||
func (a *AzureEventHubs) RegisterEventProcessor(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
cred, err := azblob.NewSharedKeyCredential(a.metadata.storageAccountName, a.metadata.storageAccountKey)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
//go:build integration_test
|
||||
// +build integration_test
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
// Licensed under the MIT License.
|
||||
// ------------------------------------------------------------
|
||||
package eventhubs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
// Note: Reuse the environment variables names from the conformance tests where possible to support reuse of setup workflows
|
||||
|
||||
// iotHubConnectionStringEnvKey defines the key containing the integration test connection string
|
||||
// For the default EventHub endpoint for an Azure IoT Hub, it will resemble:
|
||||
// Endpoint=sb://<iotHubGeneratedNamespace>.servicebus.windows.net/;SharedAccessKeyName=service;SharedAccessKey=<key>;EntityPath=<iotHubGeneratedPath>
|
||||
iotHubConnectionStringEnvKey = "AzureIotHubEventHubConnectionString"
|
||||
iotHubConsumerGroupEnvKey = "AzureIotHubBindingsConsumerGroup"
|
||||
iotHubNameEnvKey = "AzureIotHubName"
|
||||
storageAccountNameEnvKey = "AzureBlobStorageAccount"
|
||||
storageAccountKeyEnvKey = "AzureBlobStorageAccessKey"
|
||||
azureCredentialsEnvKey = "AZURE_CREDENTIALS"
|
||||
|
||||
testStorageContainerName = "iothub-bindings-integration-test"
|
||||
)
|
||||
|
||||
func createIotHubBindingsMetadata() bindings.Metadata {
|
||||
metadata := bindings.Metadata{
|
||||
Properties: map[string]string{
|
||||
connectionString: os.Getenv(iotHubConnectionStringEnvKey),
|
||||
consumerGroup: os.Getenv(iotHubConsumerGroupEnvKey),
|
||||
storageAccountName: os.Getenv(storageAccountNameEnvKey),
|
||||
storageAccountKey: os.Getenv(storageAccountKeyEnvKey),
|
||||
storageContainerName: testStorageContainerName,
|
||||
},
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func testReadIotHubEvents(t *testing.T) {
|
||||
logger := logger.NewLogger("bindings.azure.eventhubs.integration.test")
|
||||
eh := NewAzureEventHubs(logger)
|
||||
err := eh.Init(createIotHubBindingsMetadata())
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Invoke az CLI via bash script to send test IoT device events
|
||||
// Requires the AZURE_CREDENTIALS environment variable to be already set (output of `az ad sp create-for-rbac`)
|
||||
cmd := exec.Command("/bin/bash", "../../../tests/scripts/send-iot-device-events.sh")
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("IOT_HUB_NAME=%s", os.Getenv(iotHubNameEnvKey)))
|
||||
out, err := cmd.CombinedOutput()
|
||||
assert.Nil(t, err, "Error in send-iot-device-events.sh:\n%s", out)
|
||||
|
||||
// Setup Read binding to capture readResponses in a closure so that test asserts can be
|
||||
// performed on the main thread, including the case where the handler is never invoked.
|
||||
var readResponses []bindings.ReadResponse
|
||||
handler := func(data *bindings.ReadResponse) ([]byte, error) {
|
||||
readResponses = append(readResponses, *data)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
go eh.Read(handler)
|
||||
|
||||
// Note: azure-event-hubs-go SDK defaultLeasePersistenceInterval is 5s
|
||||
// Sleep long enough so that the azure event hubs SDK has time to persist updated checkpoints
|
||||
// before the test process exits.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
assert.Greater(t, len(readResponses), 0, "Failed to receive any IotHub events")
|
||||
logger.Infof("Received %d messages", len(readResponses))
|
||||
for _, r := range readResponses {
|
||||
assert.Contains(t, string(r.Data), "Integration test message")
|
||||
|
||||
// Verify expected IoT Hub device event metadata exists
|
||||
// TODO: add device messages than can populate the sysPropPartitionKey and sysPropIotHubConnectionModuleID metadata
|
||||
assert.Contains(t, r.Metadata, sysPropSequenceNumber, "IoT device event missing: %s", sysPropSequenceNumber)
|
||||
assert.Contains(t, r.Metadata, sysPropEnqueuedTime, "IoT device event missing: %s", sysPropEnqueuedTime)
|
||||
assert.Contains(t, r.Metadata, sysPropOffset, "IoT device event missing: %s", sysPropOffset)
|
||||
assert.Contains(t, r.Metadata, sysPropIotHubDeviceConnectionID, "IoT device event missing: %s", sysPropIotHubDeviceConnectionID)
|
||||
assert.Contains(t, r.Metadata, sysPropIotHubAuthGenerationID, "IoT device event missing: %s", sysPropIotHubAuthGenerationID)
|
||||
assert.Contains(t, r.Metadata, sysPropIotHubConnectionAuthMethod, "IoT device event missing: %s", sysPropIotHubConnectionAuthMethod)
|
||||
assert.Contains(t, r.Metadata, sysPropIotHubEnqueuedTime, "IoT device event missing: %s", sysPropIotHubEnqueuedTime)
|
||||
}
|
||||
|
||||
eh.Close()
|
||||
}
|
||||
|
||||
func TestIntegrationCases(t *testing.T) {
|
||||
connectionString := os.Getenv(iotHubConnectionStringEnvKey)
|
||||
if connectionString == "" {
|
||||
t.Skipf("EventHubs bindings integration to IoT Hub tests skipped. To enable them, define the endpoint connection string using environment variable '%s')", iotHubConnectionStringEnvKey)
|
||||
}
|
||||
|
||||
t.Run("Read IoT Hub events", testReadIotHubEvents)
|
||||
}
|
|
@ -8,8 +8,9 @@ package eventhubs
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -8,9 +8,11 @@ package servicebusqueues
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
servicebus "github.com/Azure/azure-service-bus-go"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
contrib_metadata "github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
@ -25,7 +27,7 @@ const (
|
|||
AzureServiceBusDefaultMessageTimeToLive = time.Hour * 24 * 14
|
||||
)
|
||||
|
||||
// AzureServiceBusQueues is an input/output binding reading from and sending events to Azure Service Bus queues
|
||||
// AzureServiceBusQueues is an input/output binding reading from and sending events to Azure Service Bus queues.
|
||||
type AzureServiceBusQueues struct {
|
||||
metadata *serviceBusQueuesMetadata
|
||||
client *servicebus.Queue
|
||||
|
@ -39,20 +41,22 @@ type serviceBusQueuesMetadata struct {
|
|||
ttl time.Duration
|
||||
}
|
||||
|
||||
// NewAzureServiceBusQueues returns a new AzureServiceBusQueues instance
|
||||
// NewAzureServiceBusQueues returns a new AzureServiceBusQueues instance.
|
||||
func NewAzureServiceBusQueues(logger logger.Logger) *AzureServiceBusQueues {
|
||||
return &AzureServiceBusQueues{logger: logger}
|
||||
}
|
||||
|
||||
// Init parses connection properties and creates a new Service Bus Queue client
|
||||
// Init parses connection properties and creates a new Service Bus Queue client.
|
||||
func (a *AzureServiceBusQueues) Init(metadata bindings.Metadata) error {
|
||||
meta, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userAgent := "dapr-" + logger.DaprVersion
|
||||
a.metadata = meta
|
||||
|
||||
ns, err := servicebus.NewNamespace(servicebus.NamespaceWithConnectionString(a.metadata.ConnectionString))
|
||||
ns, err := servicebus.NewNamespace(servicebus.NamespaceWithConnectionString(a.metadata.ConnectionString),
|
||||
servicebus.NamespaceWithUserAgent(userAgent))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -127,6 +131,9 @@ func (a *AzureServiceBusQueues) parseMetadata(metadata bindings.Metadata) (*serv
|
|||
|
||||
m.ttl = ttl
|
||||
|
||||
// Queue names are case-insensitive and are forced to lowercase. This mimics the Azure portal's behavior.
|
||||
m.QueueName = strings.ToLower(m.QueueName)
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build integration_test
|
||||
// +build integration_test
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -25,6 +26,7 @@ import (
|
|||
const (
|
||||
// Environment variable containing the connection string to Azure Service Bus
|
||||
testServiceBusEnvKey = "DAPR_TEST_AZURE_SERVICEBUS"
|
||||
ttlInSeconds = 5
|
||||
)
|
||||
|
||||
func getTestServiceBusConnectionString() string {
|
||||
|
@ -60,12 +62,12 @@ func getMessageWithRetries(queue *servicebus.Queue, maxDuration time.Duration) (
|
|||
|
||||
func TestQueueWithTTL(t *testing.T) {
|
||||
serviceBusConnectionString := getTestServiceBusConnectionString()
|
||||
assert.NotEmpty(serviceBusConnectionString, fmt.Sprintf("Azure ServiceBus connection string must set in environment variable '%s'", testServiceBusEnvKey))
|
||||
assert.NotEmpty(t, serviceBusConnectionString, fmt.Sprintf("Azure ServiceBus connection string must set in environment variable '%s'", testServiceBusEnvKey))
|
||||
|
||||
queueName := uuid.New().String()
|
||||
a := NewAzureServiceBusQueues(logger.NewLogger("test"))
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{"connectionString": serviceBusConnectionString, "queueName": queueName, metadata.TTLMetadataKey: "1"}
|
||||
m.Properties = map[string]string{"connectionString": serviceBusConnectionString, "queueName": queueName, metadata.TTLMetadataKey: fmt.Sprintf("%d", ttlInSeconds)}
|
||||
err := a.Init(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -80,16 +82,15 @@ func TestQueueWithTTL(t *testing.T) {
|
|||
|
||||
queueEntity, err := qmr.Get(context.Background(), queueName)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "PT1S", *queueEntity.DefaultMessageTimeToLive)
|
||||
assert.Equal(t, fmt.Sprintf("PT%dS", ttlInSeconds), *queueEntity.DefaultMessageTimeToLive)
|
||||
|
||||
// Assert that if waited too long, we won't see any message
|
||||
const tooLateMsgContent = "too_late_msg"
|
||||
err = a.Write(&bindings.InvokeRequest{Data: []byte(tooLateMsgContent)})
|
||||
_, err = a.Invoke(&bindings.InvokeRequest{Data: []byte(tooLateMsgContent)})
|
||||
assert.Nil(t, err)
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
time.Sleep(time.Second * (ttlInSeconds + 2))
|
||||
|
||||
const ttlInSeconds = 1
|
||||
const maxGetDuration = ttlInSeconds * time.Second
|
||||
|
||||
_, ok, err := getMessageWithRetries(queue, maxGetDuration)
|
||||
|
@ -98,7 +99,7 @@ func TestQueueWithTTL(t *testing.T) {
|
|||
|
||||
// Getting before it is expired, should return it
|
||||
const testMsgContent = "test_msg"
|
||||
err = a.Write(&bindings.InvokeRequest{Data: []byte(testMsgContent)})
|
||||
_, err = a.Invoke(&bindings.InvokeRequest{Data: []byte(testMsgContent)})
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg, ok, err := getMessageWithRetries(queue, maxGetDuration)
|
||||
|
@ -107,12 +108,12 @@ func TestQueueWithTTL(t *testing.T) {
|
|||
msgBody := string(msg.Data)
|
||||
assert.Equal(t, testMsgContent, msgBody)
|
||||
assert.NotNil(t, msg.TTL)
|
||||
assert.Equal(t, time.Second, *msg.TTL)
|
||||
assert.Equal(t, ttlInSeconds*time.Second, *msg.TTL)
|
||||
}
|
||||
|
||||
func TestPublishingWithTTL(t *testing.T) {
|
||||
serviceBusConnectionString := getTestServiceBusConnectionString()
|
||||
assert.NotEmpty(serviceBusConnectionString, fmt.Sprintf("Azure ServiceBus connection string must set in environment variable '%s'", testServiceBusEnvKey))
|
||||
assert.NotEmpty(t, serviceBusConnectionString, fmt.Sprintf("Azure ServiceBus connection string must set in environment variable '%s'", testServiceBusEnvKey))
|
||||
|
||||
queueName := uuid.New().String()
|
||||
queueBinding1 := NewAzureServiceBusQueues(logger.NewLogger("test"))
|
||||
|
@ -140,15 +141,14 @@ func TestPublishingWithTTL(t *testing.T) {
|
|||
writeRequest := bindings.InvokeRequest{
|
||||
Data: []byte(tooLateMsgContent),
|
||||
Metadata: map[string]string{
|
||||
metadata.TTLMetadataKey: "1",
|
||||
metadata.TTLMetadataKey: fmt.Sprintf("%d", ttlInSeconds),
|
||||
},
|
||||
}
|
||||
err = queueBinding1.Write(&writeRequest)
|
||||
_, err = queueBinding1.Invoke(&writeRequest)
|
||||
assert.Nil(t, err)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
time.Sleep(time.Second * (ttlInSeconds + 2))
|
||||
|
||||
const ttlInSeconds = 1
|
||||
const maxGetDuration = ttlInSeconds * time.Second
|
||||
|
||||
_, ok, err := getMessageWithRetries(queue, maxGetDuration)
|
||||
|
@ -164,10 +164,10 @@ func TestPublishingWithTTL(t *testing.T) {
|
|||
writeRequest = bindings.InvokeRequest{
|
||||
Data: []byte(testMsgContent),
|
||||
Metadata: map[string]string{
|
||||
metadata.TTLMetadataKey: "1",
|
||||
metadata.TTLMetadataKey: fmt.Sprintf("%d", ttlInSeconds),
|
||||
},
|
||||
}
|
||||
err = queueBinding2.Write(&writeRequest)
|
||||
_, err = queueBinding2.Invoke(&writeRequest)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg, ok, err := getMessageWithRetries(queue, maxGetDuration)
|
||||
|
@ -177,5 +177,5 @@ func TestPublishingWithTTL(t *testing.T) {
|
|||
assert.Equal(t, testMsgContent, msgBody)
|
||||
assert.NotNil(t, msg.TTL)
|
||||
|
||||
assert.Equal(t, time.Second, *msg.TTL)
|
||||
assert.Equal(t, ttlInSeconds*time.Second, *msg.TTL)
|
||||
}
|
||||
|
|
|
@ -9,10 +9,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -14,10 +14,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/golang-jwt/jwt"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,7 +30,7 @@ const (
|
|||
userKey = "user"
|
||||
)
|
||||
|
||||
// NewSignalR creates a new pub/sub based on Azure SignalR
|
||||
// NewSignalR creates a new pub/sub based on Azure SignalR.
|
||||
func NewSignalR(logger logger.Logger) *SignalR {
|
||||
return &SignalR{
|
||||
tokens: make(map[string]signalrCachedToken),
|
||||
|
@ -43,20 +44,23 @@ type signalrCachedToken struct {
|
|||
expiration time.Time
|
||||
}
|
||||
|
||||
// SignalR is an output binding for Azure SignalR
|
||||
// SignalR is an output binding for Azure SignalR.
|
||||
type SignalR struct {
|
||||
endpoint string
|
||||
accessKey string
|
||||
version string
|
||||
hub string
|
||||
userAgent string
|
||||
tokens map[string]signalrCachedToken
|
||||
httpClient *http.Client
|
||||
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
// Init is responsible for initializing the SignalR output based on the metadata
|
||||
// Init is responsible for initializing the SignalR output based on the metadata.
|
||||
func (s *SignalR) Init(metadata bindings.Metadata) error {
|
||||
s.userAgent = "dapr-" + logger.DaprVersion
|
||||
|
||||
connectionString, ok := metadata.Properties[connectionStringKey]
|
||||
if !ok || connectionString == "" {
|
||||
return fmt.Errorf("missing connection string")
|
||||
|
@ -127,6 +131,7 @@ func (s *SignalR) sendMessageToSignalR(url string, token string, data []byte) er
|
|||
|
||||
httpReq.Header.Set("Authorization", "Bearer "+token)
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("User-Agent", s.userAgent)
|
||||
|
||||
resp, err := s.httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigurationValid(t *testing.T) {
|
||||
|
|
|
@ -13,12 +13,13 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-storage-queue-go/azqueue"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
contrib_metadata "github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
@ -32,14 +33,14 @@ type consumer struct {
|
|||
callback func(*bindings.ReadResponse) ([]byte, error)
|
||||
}
|
||||
|
||||
// QueueHelper enables injection for testnig
|
||||
// QueueHelper enables injection for testnig.
|
||||
type QueueHelper interface {
|
||||
Init(accountName string, accountKey string, queueName string, decodeBase64 bool) error
|
||||
Write(data []byte, ttl *time.Duration) error
|
||||
Read(ctx context.Context, consumer *consumer) error
|
||||
}
|
||||
|
||||
// AzureQueueHelper concrete impl of queue helper
|
||||
// AzureQueueHelper concrete impl of queue helper.
|
||||
type AzureQueueHelper struct {
|
||||
credential *azqueue.SharedKeyCredential
|
||||
queueURL azqueue.QueueURL
|
||||
|
@ -48,7 +49,7 @@ type AzureQueueHelper struct {
|
|||
decodeBase64 bool
|
||||
}
|
||||
|
||||
// Init sets up this helper
|
||||
// Init sets up this helper.
|
||||
func (d *AzureQueueHelper) Init(accountName string, accountKey string, queueName string, decodeBase64 bool) error {
|
||||
credential, err := azqueue.NewSharedKeyCredential(accountName, accountKey)
|
||||
if err != nil {
|
||||
|
@ -57,7 +58,13 @@ func (d *AzureQueueHelper) Init(accountName string, accountKey string, queueName
|
|||
d.credential = credential
|
||||
d.decodeBase64 = decodeBase64
|
||||
u, _ := url.Parse(fmt.Sprintf(d.reqURI, accountName, queueName))
|
||||
d.queueURL = azqueue.NewQueueURL(*u, azqueue.NewPipeline(credential, azqueue.PipelineOptions{}))
|
||||
userAgent := "dapr-" + logger.DaprVersion
|
||||
pipelineOptions := azqueue.PipelineOptions{
|
||||
Telemetry: azqueue.TelemetryOptions{
|
||||
Value: userAgent,
|
||||
},
|
||||
}
|
||||
d.queueURL = azqueue.NewQueueURL(*u, azqueue.NewPipeline(credential, pipelineOptions))
|
||||
ctx := context.TODO()
|
||||
_, err = d.queueURL.Create(ctx, azqueue.Metadata{})
|
||||
if err != nil {
|
||||
|
@ -70,13 +77,17 @@ func (d *AzureQueueHelper) Init(accountName string, accountKey string, queueName
|
|||
func (d *AzureQueueHelper) Write(data []byte, ttl *time.Duration) error {
|
||||
ctx := context.TODO()
|
||||
messagesURL := d.queueURL.NewMessagesURL()
|
||||
s := string(data)
|
||||
|
||||
s, err := strconv.Unquote(string(data))
|
||||
if err != nil {
|
||||
s = string(data)
|
||||
}
|
||||
|
||||
if ttl == nil {
|
||||
ttlToUse := defaultTTL
|
||||
ttl = &ttlToUse
|
||||
}
|
||||
_, err := messagesURL.Enqueue(ctx, s, time.Second*0, *ttl)
|
||||
_, err = messagesURL.Enqueue(ctx, s, time.Second*0, *ttl)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -98,7 +109,7 @@ func (d *AzureQueueHelper) Read(ctx context.Context, consumer *consumer) error {
|
|||
var data []byte
|
||||
|
||||
if d.decodeBase64 {
|
||||
decoded, decodeError := base64.StdEncoding.DecodeString(strings.Trim(mt, "\""))
|
||||
decoded, decodeError := base64.StdEncoding.DecodeString(mt)
|
||||
if decodeError != nil {
|
||||
return decodeError
|
||||
}
|
||||
|
@ -124,7 +135,7 @@ func (d *AzureQueueHelper) Read(ctx context.Context, consumer *consumer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NewAzureQueueHelper creates new helper
|
||||
// NewAzureQueueHelper creates new helper.
|
||||
func NewAzureQueueHelper(logger logger.Logger) QueueHelper {
|
||||
return &AzureQueueHelper{
|
||||
reqURI: "https://%s.queue.core.windows.net/%s",
|
||||
|
@ -132,7 +143,7 @@ func NewAzureQueueHelper(logger logger.Logger) QueueHelper {
|
|||
}
|
||||
}
|
||||
|
||||
// AzureStorageQueues is an input/output binding reading from and sending events to Azure Storage queues
|
||||
// AzureStorageQueues is an input/output binding reading from and sending events to Azure Storage queues.
|
||||
type AzureStorageQueues struct {
|
||||
metadata *storageQueuesMetadata
|
||||
helper QueueHelper
|
||||
|
@ -148,12 +159,12 @@ type storageQueuesMetadata struct {
|
|||
ttl *time.Duration
|
||||
}
|
||||
|
||||
// NewAzureStorageQueues returns a new AzureStorageQueues instance
|
||||
// NewAzureStorageQueues returns a new AzureStorageQueues instance.
|
||||
func NewAzureStorageQueues(logger logger.Logger) *AzureStorageQueues {
|
||||
return &AzureStorageQueues{helper: NewAzureQueueHelper(logger), logger: logger}
|
||||
}
|
||||
|
||||
// Init parses connection properties and creates a new Storage Queue client
|
||||
// Init parses connection properties and creates a new Storage Queue client.
|
||||
func (a *AzureStorageQueues) Init(metadata bindings.Metadata) error {
|
||||
meta, err := a.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -12,11 +12,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type MockHelper struct {
|
||||
|
@ -196,6 +197,7 @@ func TestReadQueueDecode(t *testing.T) {
|
|||
}
|
||||
|
||||
// Uncomment this function to test reding from local queue
|
||||
//nolint:godot
|
||||
/* func TestReadLocalQueue(t *testing.T) {
|
||||
a := AzureStorageQueues{helper: &AzureQueueHelper{reqURI: "http://127.0.0.1:10001/%s/%s"}}
|
||||
|
||||
|
|
|
@ -9,13 +9,14 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/robfig/cron/v3"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// Binding represents Cron input binding
|
||||
// Binding represents Cron input binding.
|
||||
type Binding struct {
|
||||
logger logger.Logger
|
||||
schedule string
|
||||
|
@ -25,7 +26,7 @@ type Binding struct {
|
|||
|
||||
var _ = bindings.InputBinding(&Binding{})
|
||||
|
||||
// NewCron returns a new Cron event input binding
|
||||
// NewCron returns a new Cron event input binding.
|
||||
func NewCron(logger logger.Logger) *Binding {
|
||||
return &Binding{
|
||||
logger: logger,
|
||||
|
@ -54,7 +55,7 @@ func (b *Binding) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Read triggers the Cron scheduler
|
||||
// Read triggers the Cron scheduler.
|
||||
func (b *Binding) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
c := cron.New(cron.WithParser(b.parser))
|
||||
id, err := c.AddFunc(b.schedule, func() {
|
||||
|
@ -78,7 +79,7 @@ func (b *Binding) Read(handler func(*bindings.ReadResponse) ([]byte, error)) err
|
|||
return nil
|
||||
}
|
||||
|
||||
// Invoke exposes way to stop previously started cron
|
||||
// Invoke exposes way to stop previously started cron.
|
||||
func (b *Binding) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
b.logger.Debugf("operation: %v", req.Operation)
|
||||
if req.Operation != bindings.DeleteOperation {
|
||||
|
@ -95,7 +96,7 @@ func (b *Binding) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse,
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Operations method returns the supported operations by this binding
|
||||
// Operations method returns the supported operations by this binding.
|
||||
func (b *Binding) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
bindings.DeleteOperation,
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func getTestMetadata(schedule string) bindings.Metadata {
|
||||
|
@ -32,7 +33,7 @@ func getNewCron() *Binding {
|
|||
return NewCron(l)
|
||||
}
|
||||
|
||||
// go test -v -timeout 15s -count=1 ./bindings/cron/
|
||||
// go test -v -timeout 15s -count=1 ./bindings/cron/.
|
||||
func TestCronInitSuccess(t *testing.T) {
|
||||
c := getNewCron()
|
||||
err := c.Init(getTestMetadata("@every 1h"))
|
||||
|
@ -52,7 +53,7 @@ func TestCronInitFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestLongRead
|
||||
// go test -v -count=1 -timeout 15s -run TestLongRead ./bindings/cron/
|
||||
// go test -v -count=1 -timeout 15s -run TestLongRead ./bindings/cron/.
|
||||
func TestCronReadWithDeleteInvoke(t *testing.T) {
|
||||
c := getNewCron()
|
||||
schedule := "@every 1s"
|
||||
|
|
|
@ -7,18 +7,36 @@ package bucket
|
|||
|
||||
import (
|
||||
"context"
|
||||
b64 "encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// GCPStorage allows saving data to GCP bucket storage
|
||||
const (
|
||||
objectURLBase = "https://storage.googleapis.com/%s/%s"
|
||||
metadataDecodeBase64 = "decodeBase64"
|
||||
metadataEncodeBase64 = "encodeBase64"
|
||||
|
||||
metadataKey = "key"
|
||||
maxResults = 1000
|
||||
|
||||
metadataKeyBC = "name"
|
||||
)
|
||||
|
||||
// GCPStorage allows saving data to GCP bucket storage.
|
||||
type GCPStorage struct {
|
||||
metadata gcpMetadata
|
||||
metadata *gcpMetadata
|
||||
client *storage.Client
|
||||
logger logger.Logger
|
||||
}
|
||||
|
@ -35,25 +53,32 @@ type gcpMetadata struct {
|
|||
TokenURI string `json:"token_uri"`
|
||||
AuthProviderCertURL string `json:"auth_provider_x509_cert_url"`
|
||||
ClientCertURL string `json:"client_x509_cert_url"`
|
||||
DecodeBase64 bool `json:"decodeBase64,string"`
|
||||
EncodeBase64 bool `json:"encodeBase64,string"`
|
||||
}
|
||||
|
||||
// NewGCPStorage returns a new GCP storage instance
|
||||
type listPayload struct {
|
||||
Prefix string `json:"prefix"`
|
||||
MaxResults int32 `json:"maxResults"`
|
||||
Delimiter string `json:"delimiter"`
|
||||
}
|
||||
|
||||
type createResponse struct {
|
||||
ObjectURL string `json:"objectURL"`
|
||||
}
|
||||
|
||||
// NewGCPStorage returns a new GCP storage instance.
|
||||
func NewGCPStorage(logger logger.Logger) *GCPStorage {
|
||||
return &GCPStorage{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs connection parsing
|
||||
// Init performs connection parsing.
|
||||
func (g *GCPStorage) Init(metadata bindings.Metadata) error {
|
||||
b, err := g.parseMetadata(metadata)
|
||||
m, b, err := g.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var gm gcpMetadata
|
||||
err = json.Unmarshal(b, &gm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientOptions := option.WithCredentialsJSON(b)
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx, clientOptions)
|
||||
|
@ -61,45 +86,225 @@ func (g *GCPStorage) Init(metadata bindings.Metadata) error {
|
|||
return err
|
||||
}
|
||||
|
||||
g.metadata = gm
|
||||
g.metadata = m
|
||||
g.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GCPStorage) parseMetadata(metadata bindings.Metadata) ([]byte, error) {
|
||||
func (g *GCPStorage) parseMetadata(metadata bindings.Metadata) (*gcpMetadata, []byte, error) {
|
||||
b, err := json.Marshal(metadata.Properties)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var m gcpMetadata
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &m, b, nil
|
||||
}
|
||||
|
||||
func (g *GCPStorage) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
bindings.CreateOperation,
|
||||
bindings.GetOperation,
|
||||
bindings.DeleteOperation,
|
||||
bindings.ListOperation,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GCPStorage) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
req.Metadata = g.handleBackwardCompatibilityForMetadata(req.Metadata)
|
||||
|
||||
switch req.Operation {
|
||||
case bindings.CreateOperation:
|
||||
return g.create(req)
|
||||
case bindings.GetOperation:
|
||||
return g.get(req)
|
||||
case bindings.DeleteOperation:
|
||||
return g.delete(req)
|
||||
case bindings.ListOperation:
|
||||
return g.list(req)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported operation %s", req.Operation)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GCPStorage) create(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var err error
|
||||
metadata, err := g.metadata.mergeWithRequestMetadata(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp bucket binding error. error merge metadata : %w", err)
|
||||
}
|
||||
|
||||
var name string
|
||||
if val, ok := req.Metadata[metadataKey]; ok && val != "" {
|
||||
name = val
|
||||
} else {
|
||||
name = uuid.New().String()
|
||||
g.logger.Debugf("key not found. generating name %s", name)
|
||||
}
|
||||
|
||||
d, err := strconv.Unquote(string(req.Data))
|
||||
if err == nil {
|
||||
req.Data = []byte(d)
|
||||
}
|
||||
|
||||
if metadata.DecodeBase64 {
|
||||
decoded, decodeError := b64.StdEncoding.DecodeString(string(req.Data))
|
||||
if decodeError != nil {
|
||||
return nil, fmt.Errorf("gcp bucket binding error. decode : %w", decodeError)
|
||||
}
|
||||
req.Data = decoded
|
||||
}
|
||||
|
||||
h := g.client.Bucket(g.metadata.Bucket).Object(name).NewWriter(context.Background())
|
||||
defer h.Close()
|
||||
if _, err = h.Write(req.Data); err != nil {
|
||||
return nil, fmt.Errorf("gcp bucket binding error. Uploading: %w", err)
|
||||
}
|
||||
|
||||
objectURL, err := url.Parse(fmt.Sprintf(objectURLBase, g.metadata.Bucket, name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp bucket binding error. error building url response: %w", err)
|
||||
}
|
||||
|
||||
resp := createResponse{
|
||||
ObjectURL: objectURL.String(),
|
||||
}
|
||||
|
||||
b, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp binding error. error marshalling create response: %w", err)
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: b,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *GCPStorage) get(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
metadata, err := g.metadata.mergeWithRequestMetadata(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp binding binding error. error merge metadata : %w", err)
|
||||
}
|
||||
|
||||
var key string
|
||||
if val, ok := req.Metadata[metadataKey]; ok && val != "" {
|
||||
key = val
|
||||
} else {
|
||||
return nil, fmt.Errorf("gcp bucket binding error: can't read key value")
|
||||
}
|
||||
|
||||
rc, err := g.client.Bucket(g.metadata.Bucket).Object(key).NewReader(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp bucketgcp bucket binding error: error downloading bucket object: %w", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp bucketgcp bucket binding error: ioutil.ReadAll: %v", err)
|
||||
}
|
||||
|
||||
if metadata.EncodeBase64 {
|
||||
encoded := b64.StdEncoding.EncodeToString(data)
|
||||
data = []byte(encoded)
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: data,
|
||||
Metadata: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *GCPStorage) delete(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var key string
|
||||
if val, ok := req.Metadata[metadataKey]; ok && val != "" {
|
||||
key = val
|
||||
} else {
|
||||
return nil, fmt.Errorf("gcp bucketgcp bucket binding error: can't read key value")
|
||||
}
|
||||
|
||||
object := g.client.Bucket(g.metadata.Bucket).Object(key)
|
||||
|
||||
err := object.Delete(context.Background())
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (g *GCPStorage) list(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var payload listPayload
|
||||
err := json.Unmarshal(req.Data, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
if payload.MaxResults == int32(0) {
|
||||
payload.MaxResults = maxResults
|
||||
}
|
||||
|
||||
func (g *GCPStorage) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
input := &storage.Query{
|
||||
Prefix: payload.Prefix,
|
||||
Delimiter: payload.Delimiter,
|
||||
}
|
||||
|
||||
func (g *GCPStorage) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var name string
|
||||
if val, ok := req.Metadata["name"]; ok && val != "" {
|
||||
name = val
|
||||
} else {
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var result []storage.ObjectAttrs
|
||||
it := g.client.Bucket(g.metadata.Bucket).Objects(context.Background(), input)
|
||||
for {
|
||||
attrs, errIt := it.Next()
|
||||
if errIt == iterator.Done || len(result) == int(payload.MaxResults) {
|
||||
break
|
||||
}
|
||||
name = id.String()
|
||||
}
|
||||
h := g.client.Bucket(g.metadata.Bucket).Object(name).NewWriter(context.Background())
|
||||
defer h.Close()
|
||||
if _, err := h.Write(req.Data); err != nil {
|
||||
return nil, err
|
||||
result = append(result, *attrs)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
jsonResponse, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gcp bucketgcp bucket binding error. list operation. cannot marshal blobs to json: %w", err)
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: jsonResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *GCPStorage) Close() error {
|
||||
return g.client.Close()
|
||||
}
|
||||
|
||||
// Helper to merge config and request metadata.
|
||||
func (metadata gcpMetadata) mergeWithRequestMetadata(req *bindings.InvokeRequest) (gcpMetadata, error) {
|
||||
merged := metadata
|
||||
|
||||
if val, ok := req.Metadata[metadataDecodeBase64]; ok && val != "" {
|
||||
valBool, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return merged, err
|
||||
}
|
||||
merged.DecodeBase64 = valBool
|
||||
}
|
||||
|
||||
if val, ok := req.Metadata[metadataEncodeBase64]; ok && val != "" {
|
||||
valBool, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return merged, err
|
||||
}
|
||||
merged.EncodeBase64 = valBool
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// Add backward compatibility. 'key' replace 'name'.
|
||||
func (g *GCPStorage) handleBackwardCompatibilityForMetadata(metadata map[string]string) map[string]string {
|
||||
if val, ok := metadata[metadataKeyBC]; ok && val != "" {
|
||||
metadata[metadataKey] = val
|
||||
delete(metadata, metadataKeyBC)
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
|
|
@ -6,37 +6,224 @@
|
|||
package bucket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"auth_provider_x509_cert_url": "a", "auth_uri": "a", "Bucket": "a", "client_x509_cert_url": "a", "client_email": "a", "client_id": "a", "private_key": "a",
|
||||
"private_key_id": "a", "project_id": "a", "token_uri": "a", "type": "a",
|
||||
}
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
b, err := gs.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
t.Run("Has correct metadata", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"auth_provider_x509_cert_url": "my_auth_provider_x509",
|
||||
"auth_uri": "my_auth_uri",
|
||||
"Bucket": "my_bucket",
|
||||
"client_x509_cert_url": "my_client_x509",
|
||||
"client_email": "my_email@mail.dapr",
|
||||
"client_id": "my_client_id",
|
||||
"private_key": "my_private_key",
|
||||
"private_key_id": "my_private_key_id",
|
||||
"project_id": "my_project_id",
|
||||
"token_uri": "my_token_uri",
|
||||
"type": "my_type",
|
||||
}
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
meta, _, err := gs.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var gm gcpMetadata
|
||||
err = json.Unmarshal(b, &gm)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "my_auth_provider_x509", meta.AuthProviderCertURL)
|
||||
assert.Equal(t, "my_auth_uri", meta.AuthURI)
|
||||
assert.Equal(t, "my_bucket", meta.Bucket)
|
||||
assert.Equal(t, "my_client_x509", meta.ClientCertURL)
|
||||
assert.Equal(t, "my_email@mail.dapr", meta.ClientEmail)
|
||||
assert.Equal(t, "my_client_id", meta.ClientID)
|
||||
assert.Equal(t, "my_private_key", meta.PrivateKey)
|
||||
assert.Equal(t, "my_private_key_id", meta.PrivateKeyID)
|
||||
assert.Equal(t, "my_project_id", meta.ProjectID)
|
||||
assert.Equal(t, "my_token_uri", meta.TokenURI)
|
||||
assert.Equal(t, "my_type", meta.Type)
|
||||
})
|
||||
|
||||
assert.Equal(t, "a", gm.AuthProviderCertURL)
|
||||
assert.Equal(t, "a", gm.AuthURI)
|
||||
assert.Equal(t, "a", gm.Bucket)
|
||||
assert.Equal(t, "a", gm.ClientCertURL)
|
||||
assert.Equal(t, "a", gm.ClientEmail)
|
||||
assert.Equal(t, "a", gm.ClientID)
|
||||
assert.Equal(t, "a", gm.PrivateKey)
|
||||
assert.Equal(t, "a", gm.PrivateKeyID)
|
||||
assert.Equal(t, "a", gm.ProjectID)
|
||||
assert.Equal(t, "a", gm.TokenURI)
|
||||
assert.Equal(t, "a", gm.Type)
|
||||
t.Run("check backward compatibility", func(t *testing.T) {
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Operation = bindings.CreateOperation
|
||||
request.Metadata = map[string]string{
|
||||
"name": "my_file.txt",
|
||||
}
|
||||
result := gs.handleBackwardCompatibilityForMetadata(request.Metadata)
|
||||
assert.NotEmpty(t, result["key"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestMergeWithRequestMetadata(t *testing.T) {
|
||||
t.Run("Has merged metadata", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"auth_provider_x509_cert_url": "my_auth_provider_x509",
|
||||
"auth_uri": "my_auth_uri",
|
||||
"Bucket": "my_bucket",
|
||||
"client_x509_cert_url": "my_client_x509",
|
||||
"client_email": "my_email@mail.dapr",
|
||||
"client_id": "my_client_id",
|
||||
"private_key": "my_private_key",
|
||||
"private_key_id": "my_private_key_id",
|
||||
"project_id": "my_project_id",
|
||||
"token_uri": "my_token_uri",
|
||||
"type": "my_type",
|
||||
"decodeBase64": "false",
|
||||
}
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
meta, _, err := gs.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "my_auth_provider_x509", meta.AuthProviderCertURL)
|
||||
assert.Equal(t, "my_auth_uri", meta.AuthURI)
|
||||
assert.Equal(t, "my_bucket", meta.Bucket)
|
||||
assert.Equal(t, "my_client_x509", meta.ClientCertURL)
|
||||
assert.Equal(t, "my_email@mail.dapr", meta.ClientEmail)
|
||||
assert.Equal(t, "my_client_id", meta.ClientID)
|
||||
assert.Equal(t, "my_private_key", meta.PrivateKey)
|
||||
assert.Equal(t, "my_private_key_id", meta.PrivateKeyID)
|
||||
assert.Equal(t, "my_project_id", meta.ProjectID)
|
||||
assert.Equal(t, "my_token_uri", meta.TokenURI)
|
||||
assert.Equal(t, "my_type", meta.Type)
|
||||
assert.Equal(t, false, meta.DecodeBase64)
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"decodeBase64": "true",
|
||||
}
|
||||
|
||||
mergedMeta, err := meta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "my_auth_provider_x509", mergedMeta.AuthProviderCertURL)
|
||||
assert.Equal(t, "my_auth_uri", mergedMeta.AuthURI)
|
||||
assert.Equal(t, "my_bucket", mergedMeta.Bucket)
|
||||
assert.Equal(t, "my_client_x509", mergedMeta.ClientCertURL)
|
||||
assert.Equal(t, "my_email@mail.dapr", mergedMeta.ClientEmail)
|
||||
assert.Equal(t, "my_client_id", mergedMeta.ClientID)
|
||||
assert.Equal(t, "my_private_key", mergedMeta.PrivateKey)
|
||||
assert.Equal(t, "my_private_key_id", mergedMeta.PrivateKeyID)
|
||||
assert.Equal(t, "my_project_id", mergedMeta.ProjectID)
|
||||
assert.Equal(t, "my_token_uri", mergedMeta.TokenURI)
|
||||
assert.Equal(t, "my_type", mergedMeta.Type)
|
||||
assert.Equal(t, true, mergedMeta.DecodeBase64)
|
||||
})
|
||||
|
||||
t.Run("Has invalid merged metadata decodeBase64", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"auth_provider_x509_cert_url": "my_auth_provider_x509",
|
||||
"auth_uri": "my_auth_uri",
|
||||
"Bucket": "my_bucket",
|
||||
"client_x509_cert_url": "my_client_x509",
|
||||
"client_email": "my_email@mail.dapr",
|
||||
"client_id": "my_client_id",
|
||||
"private_key": "my_private_key",
|
||||
"private_key_id": "my_private_key_id",
|
||||
"project_id": "my_project_id",
|
||||
"token_uri": "my_token_uri",
|
||||
"type": "my_type",
|
||||
"decodeBase64": "false",
|
||||
}
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
meta, _, err := gs.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "my_auth_provider_x509", meta.AuthProviderCertURL)
|
||||
assert.Equal(t, "my_auth_uri", meta.AuthURI)
|
||||
assert.Equal(t, "my_bucket", meta.Bucket)
|
||||
assert.Equal(t, "my_client_x509", meta.ClientCertURL)
|
||||
assert.Equal(t, "my_email@mail.dapr", meta.ClientEmail)
|
||||
assert.Equal(t, "my_client_id", meta.ClientID)
|
||||
assert.Equal(t, "my_private_key", meta.PrivateKey)
|
||||
assert.Equal(t, "my_private_key_id", meta.PrivateKeyID)
|
||||
assert.Equal(t, "my_project_id", meta.ProjectID)
|
||||
assert.Equal(t, "my_token_uri", meta.TokenURI)
|
||||
assert.Equal(t, "my_type", meta.Type)
|
||||
assert.Equal(t, false, meta.DecodeBase64)
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"decodeBase64": "hello",
|
||||
}
|
||||
|
||||
mergedMeta, err := meta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.NotNil(t, mergedMeta)
|
||||
})
|
||||
t.Run("Has invalid merged metadata encodeBase64", func(t *testing.T) {
|
||||
m := bindings.Metadata{}
|
||||
m.Properties = map[string]string{
|
||||
"auth_provider_x509_cert_url": "my_auth_provider_x509",
|
||||
"auth_uri": "my_auth_uri",
|
||||
"Bucket": "my_bucket",
|
||||
"client_x509_cert_url": "my_client_x509",
|
||||
"client_email": "my_email@mail.dapr",
|
||||
"client_id": "my_client_id",
|
||||
"private_key": "my_private_key",
|
||||
"private_key_id": "my_private_key_id",
|
||||
"project_id": "my_project_id",
|
||||
"token_uri": "my_token_uri",
|
||||
"type": "my_type",
|
||||
"decodeBase64": "false",
|
||||
"encodeBase64": "true",
|
||||
}
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
meta, _, err := gs.parseMetadata(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "my_auth_provider_x509", meta.AuthProviderCertURL)
|
||||
assert.Equal(t, "my_auth_uri", meta.AuthURI)
|
||||
assert.Equal(t, "my_bucket", meta.Bucket)
|
||||
assert.Equal(t, "my_client_x509", meta.ClientCertURL)
|
||||
assert.Equal(t, "my_email@mail.dapr", meta.ClientEmail)
|
||||
assert.Equal(t, "my_client_id", meta.ClientID)
|
||||
assert.Equal(t, "my_private_key", meta.PrivateKey)
|
||||
assert.Equal(t, "my_private_key_id", meta.PrivateKeyID)
|
||||
assert.Equal(t, "my_project_id", meta.ProjectID)
|
||||
assert.Equal(t, "my_token_uri", meta.TokenURI)
|
||||
assert.Equal(t, "my_type", meta.Type)
|
||||
assert.Equal(t, false, meta.DecodeBase64)
|
||||
assert.Equal(t, true, meta.EncodeBase64)
|
||||
|
||||
request := bindings.InvokeRequest{}
|
||||
request.Metadata = map[string]string{
|
||||
"encodeBase64": "hello",
|
||||
}
|
||||
|
||||
mergedMeta, err := meta.mergeWithRequestMetadata(&request)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.NotNil(t, mergedMeta)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetOption(t *testing.T) {
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
gs.metadata = &gcpMetadata{}
|
||||
t.Run("return error if key is missing", func(t *testing.T) {
|
||||
r := bindings.InvokeRequest{}
|
||||
_, err := gs.get(&r)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteOption(t *testing.T) {
|
||||
gs := GCPStorage{logger: logger.NewLogger("test")}
|
||||
gs.metadata = &gcpMetadata{}
|
||||
|
||||
t.Run("return error if key is missing", func(t *testing.T) {
|
||||
r := bindings.InvokeRequest{}
|
||||
_, err := gs.delete(&r)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -11,9 +11,10 @@ import (
|
|||
"fmt"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -22,7 +23,7 @@ const (
|
|||
topic = "topic"
|
||||
)
|
||||
|
||||
// GCPPubSub is an input/output binding for GCP Pub Sub
|
||||
// GCPPubSub is an input/output binding for GCP Pub Sub.
|
||||
type GCPPubSub struct {
|
||||
client *pubsub.Client
|
||||
metadata *pubSubMetadata
|
||||
|
@ -44,12 +45,12 @@ type pubSubMetadata struct {
|
|||
ClientCertURL string `json:"client_x509_cert_url"`
|
||||
}
|
||||
|
||||
// NewGCPPubSub returns a new GCPPubSub instance
|
||||
// NewGCPPubSub returns a new GCPPubSub instance.
|
||||
func NewGCPPubSub(logger logger.Logger) *GCPPubSub {
|
||||
return &GCPPubSub{logger: logger}
|
||||
}
|
||||
|
||||
// Init parses metadata and creates a new Pub Sub client
|
||||
// Init parses metadata and creates a new Pub Sub client.
|
||||
func (g *GCPPubSub) Init(metadata bindings.Metadata) error {
|
||||
b, err := g.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
|
|
|
@ -13,20 +13,21 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
graphql "github.com/machinebox/graphql"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
graphql "github.com/machinebox/graphql"
|
||||
)
|
||||
|
||||
const (
|
||||
// configurations to connect to GraphQL
|
||||
// configurations to connect to GraphQL.
|
||||
connectionEndPointKey = "endpoint"
|
||||
|
||||
// keys from request's metadata
|
||||
// keys from request's metadata.
|
||||
commandQuery = "query"
|
||||
commandMutation = "mutation"
|
||||
|
||||
// keys from response's metadata
|
||||
// keys from response's metadata.
|
||||
respOpKey = "operation"
|
||||
respStartTimeKey = "start-time"
|
||||
respEndTimeKey = "end-time"
|
||||
|
@ -36,7 +37,7 @@ const (
|
|||
MutationOperation bindings.OperationKind = "mutation"
|
||||
)
|
||||
|
||||
// GraphQL represents GraphQL output bindings
|
||||
// GraphQL represents GraphQL output bindings.
|
||||
type GraphQL struct {
|
||||
client *graphql.Client
|
||||
header map[string]string
|
||||
|
@ -45,12 +46,12 @@ type GraphQL struct {
|
|||
|
||||
var _ = bindings.OutputBinding(&GraphQL{})
|
||||
|
||||
// NewGraphQL returns a new GraphQL binding instance
|
||||
// NewGraphQL returns a new GraphQL binding instance.
|
||||
func NewGraphQL(logger logger.Logger) *GraphQL {
|
||||
return &GraphQL{logger: logger}
|
||||
}
|
||||
|
||||
// Init initializes the GraphQL binding
|
||||
// Init initializes the GraphQL binding.
|
||||
func (gql *GraphQL) Init(metadata bindings.Metadata) error {
|
||||
gql.logger.Debug("GraphQL Error: Initializing GraphQL binding")
|
||||
|
||||
|
@ -74,7 +75,7 @@ func (gql *GraphQL) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by GraphQL binding
|
||||
// Operations returns list of operations supported by GraphQL binding.
|
||||
func (gql *GraphQL) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
QueryOperation,
|
||||
|
@ -82,7 +83,7 @@ func (gql *GraphQL) Operations() []bindings.OperationKind {
|
|||
}
|
||||
}
|
||||
|
||||
// Invoke handles all invoke operations
|
||||
// Invoke handles all invoke operations.
|
||||
func (gql *GraphQL) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
if req == nil {
|
||||
return nil, fmt.Errorf("GraphQL Error: Invoke request required")
|
||||
|
|
|
@ -36,12 +36,12 @@ type httpMetadata struct {
|
|||
URL string `mapstructure:"url"`
|
||||
}
|
||||
|
||||
// NewHTTP returns a new HTTPSource
|
||||
// NewHTTP returns a new HTTPSource.
|
||||
func NewHTTP(logger logger.Logger) *HTTPSource {
|
||||
return &HTTPSource{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs metadata parsing
|
||||
// Init performs metadata parsing.
|
||||
func (h *HTTPSource) Init(metadata bindings.Metadata) error {
|
||||
if err := mapstructure.Decode(metadata.Properties, &h.metadata); err != nil {
|
||||
return err
|
||||
|
|
|
@ -12,10 +12,11 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/dapr/kit/logger"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
binding_http "github.com/dapr/components-contrib/bindings/http"
|
||||
)
|
||||
|
|
|
@ -11,13 +11,14 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go"
|
||||
"github.com/influxdata/influxdb-client-go/api"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// Influx allows writing to InfluxDB
|
||||
// Influx allows writing to InfluxDB.
|
||||
type Influx struct {
|
||||
metadata *influxMetadata
|
||||
client influxdb2.Client
|
||||
|
@ -32,12 +33,12 @@ type influxMetadata struct {
|
|||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
// NewInflux returns a new kafka binding instance
|
||||
// NewInflux returns a new kafka binding instance.
|
||||
func NewInflux(logger logger.Logger) *Influx {
|
||||
return &Influx{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection establishment
|
||||
// Init does metadata parsing and connection establishment.
|
||||
func (i *Influx) Init(metadata bindings.Metadata) error {
|
||||
influxMeta, err := i.getInfluxMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -68,7 +69,7 @@ func (i *Influx) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetInfluxMetadata returns new Influx metadata
|
||||
// GetInfluxMetadata returns new Influx metadata.
|
||||
func (i *Influx) getInfluxMetadata(metadata bindings.Metadata) (*influxMetadata, error) {
|
||||
b, err := json.Marshal(metadata.Properties)
|
||||
if err != nil {
|
||||
|
@ -84,12 +85,12 @@ func (i *Influx) getInfluxMetadata(metadata bindings.Metadata) (*influxMetadata,
|
|||
return &iMetadata, nil
|
||||
}
|
||||
|
||||
// Operations returns supported operations
|
||||
// Operations returns supported operations.
|
||||
func (i *Influx) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
||||
// Invoke called on supported operations
|
||||
// Invoke called on supported operations.
|
||||
func (i *Influx) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var jsonPoint map[string]interface{}
|
||||
err := json.Unmarshal(req.Data, &jsonPoint)
|
||||
|
|
|
@ -8,9 +8,10 @@ package influx
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
package bindings
|
||||
|
||||
// InputBinding is the interface to define a binding that triggers on incoming events
|
||||
// InputBinding is the interface to define a binding that triggers on incoming events.
|
||||
type InputBinding interface {
|
||||
// Init passes connection and properties metadata to the binding implementation
|
||||
Init(metadata Metadata) error
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
@ -26,7 +27,7 @@ const (
|
|||
key = "partitionKey"
|
||||
)
|
||||
|
||||
// Kafka allows reading/writing to a Kafka consumer group
|
||||
// Kafka allows reading/writing to a Kafka consumer group.
|
||||
type Kafka struct {
|
||||
producer sarama.SyncProducer
|
||||
topics []string
|
||||
|
@ -78,12 +79,12 @@ func (consumer *consumer) Setup(sarama.ConsumerGroupSession) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NewKafka returns a new kafka binding instance
|
||||
// NewKafka returns a new kafka binding instance.
|
||||
func NewKafka(logger logger.Logger) *Kafka {
|
||||
return &Kafka{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection establishment
|
||||
// Init does metadata parsing and connection establishment.
|
||||
func (k *Kafka) Init(metadata bindings.Metadata) error {
|
||||
meta, err := k.getKafkaMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -133,7 +134,7 @@ func (k *Kafka) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, e
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// GetKafkaMetadata returns new Kafka metadata
|
||||
// GetKafkaMetadata returns new Kafka metadata.
|
||||
func (k *Kafka) getKafkaMetadata(metadata bindings.Metadata) (*kafkaMetadata, error) {
|
||||
meta := kafkaMetadata{}
|
||||
meta.ConsumerGroup = metadata.Properties["consumerGroup"]
|
||||
|
|
|
@ -9,11 +9,13 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -14,13 +14,14 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
kubeclient "github.com/dapr/components-contrib/authentication/kubernetes"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
kubeclient "github.com/dapr/components-contrib/authentication/kubernetes"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
type kubernetesInput struct {
|
||||
|
@ -38,7 +39,7 @@ type EventResponse struct {
|
|||
|
||||
var _ = bindings.InputBinding(&kubernetesInput{})
|
||||
|
||||
// NewKubernetes returns a new Kubernetes event input binding
|
||||
// NewKubernetes returns a new Kubernetes event input binding.
|
||||
func NewKubernetes(logger logger.Logger) bindings.InputBinding {
|
||||
return &kubernetesInput{logger: logger}
|
||||
}
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -16,22 +16,23 @@ import (
|
|||
"strconv"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
fileNameMetadataKey = "fileName"
|
||||
)
|
||||
|
||||
// LocalStorage allows saving files to disk
|
||||
// LocalStorage allows saving files to disk.
|
||||
type LocalStorage struct {
|
||||
metadata *Metadata
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
// Metadata defines the metadata
|
||||
// Metadata defines the metadata.
|
||||
type Metadata struct {
|
||||
RootPath string `json:"rootPath"`
|
||||
}
|
||||
|
@ -40,12 +41,12 @@ type createResponse struct {
|
|||
FileName string `json:"fileName"`
|
||||
}
|
||||
|
||||
// NewLocalStorage returns a new LocalStorage instance
|
||||
// NewLocalStorage returns a new LocalStorage instance.
|
||||
func NewLocalStorage(logger logger.Logger) *LocalStorage {
|
||||
return &LocalStorage{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs metadata parsing
|
||||
// Init performs metadata parsing.
|
||||
func (ls *LocalStorage) Init(metadata bindings.Metadata) error {
|
||||
m, err := ls.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -77,7 +78,7 @@ func (ls *LocalStorage) parseMetadata(metadata bindings.Metadata) (*Metadata, er
|
|||
return &m, nil
|
||||
}
|
||||
|
||||
// Operations enumerates supported binding operations
|
||||
// Operations enumerates supported binding operations.
|
||||
func (ls *LocalStorage) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
bindings.CreateOperation,
|
||||
|
@ -231,7 +232,7 @@ func walkPath(root string) ([]string, error) {
|
|||
return files, err
|
||||
}
|
||||
|
||||
// Invoke is called for output bindings
|
||||
// Invoke is called for output bindings.
|
||||
func (ls *LocalStorage) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
filename := ""
|
||||
if val, ok := req.Metadata[fileNameMetadataKey]; ok && val != "" {
|
||||
|
|
|
@ -8,9 +8,10 @@ package localstorage
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
package bindings
|
||||
|
||||
// Metadata represents a set of binding specific properties
|
||||
// Metadata represents a set of binding specific properties.
|
||||
type Metadata struct {
|
||||
Name string
|
||||
Properties map[string]string `json:"properties"`
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// Keys
|
||||
// Keys.
|
||||
mqttURL = "url"
|
||||
mqttTopic = "topic"
|
||||
mqttQOS = "qos"
|
||||
|
@ -39,17 +39,17 @@ const (
|
|||
mqttClientKey = "clientKey"
|
||||
mqttBackOffMaxRetries = "backOffMaxRetries"
|
||||
|
||||
// errors
|
||||
// errors.
|
||||
errorMsgPrefix = "mqtt binding error:"
|
||||
|
||||
// Defaults
|
||||
// Defaults.
|
||||
defaultQOS = 0
|
||||
defaultRetain = false
|
||||
defaultWait = 3 * time.Second
|
||||
defaultCleanSession = true
|
||||
)
|
||||
|
||||
// MQTT allows sending and receiving data to/from an MQTT broker
|
||||
// MQTT allows sending and receiving data to/from an MQTT broker.
|
||||
type MQTT struct {
|
||||
producer mqtt.Client
|
||||
consumer mqtt.Client
|
||||
|
@ -61,7 +61,7 @@ type MQTT struct {
|
|||
backOff backoff.BackOff
|
||||
}
|
||||
|
||||
// NewMQTT returns a new MQTT instance
|
||||
// NewMQTT returns a new MQTT instance.
|
||||
func NewMQTT(logger logger.Logger) *MQTT {
|
||||
return &MQTT{logger: logger}
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ func parseMQTTMetaData(md bindings.Metadata) (*metadata, error) {
|
|||
return &m, nil
|
||||
}
|
||||
|
||||
// Init does MQTT connection parsing
|
||||
// Init does MQTT connection parsing.
|
||||
func (m *MQTT) Init(metadata bindings.Metadata) error {
|
||||
mqttMeta, err := parseMQTTMetaData(metadata)
|
||||
if err != nil {
|
||||
|
|
|
@ -11,8 +11,9 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
)
|
||||
|
||||
func getFakeProperties() map[string]string {
|
||||
|
|
|
@ -17,10 +17,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,7 +30,7 @@ const (
|
|||
queryOperation bindings.OperationKind = "query"
|
||||
closeOperation bindings.OperationKind = "close"
|
||||
|
||||
// configurations to connect to Mysql, either a data source name represent by URL
|
||||
// configurations to connect to Mysql, either a data source name represent by URL.
|
||||
connectionURLKey = "url"
|
||||
|
||||
// To connect to MySQL running in Azure over SSL you have to download a
|
||||
|
@ -38,19 +39,19 @@ const (
|
|||
// When the user provides a pem path their connection string must end with
|
||||
// &tls=custom
|
||||
// The connection string should be in the following format
|
||||
// "%s:%s@tcp(%s:3306)/%s?allowNativePasswords=true&tls=custom",'myadmin@mydemoserver', 'yourpassword', 'mydemoserver.mysql.database.azure.com', 'targetdb'
|
||||
// "%s:%s@tcp(%s:3306)/%s?allowNativePasswords=true&tls=custom",'myadmin@mydemoserver', 'yourpassword', 'mydemoserver.mysql.database.azure.com', 'targetdb'.
|
||||
pemPathKey = "pemPath"
|
||||
|
||||
// other general settings for DB connections
|
||||
// other general settings for DB connections.
|
||||
maxIdleConnsKey = "maxIdleConns"
|
||||
maxOpenConnsKey = "maxOpenConns"
|
||||
connMaxLifetimeKey = "connMaxLifetime"
|
||||
connMaxIdleTimeKey = "connMaxIdleTime"
|
||||
|
||||
// keys from request's metadata
|
||||
// keys from request's metadata.
|
||||
commandSQLKey = "sql"
|
||||
|
||||
// keys from response's metadata
|
||||
// keys from response's metadata.
|
||||
respOpKey = "operation"
|
||||
respSQLKey = "sql"
|
||||
respStartTimeKey = "start-time"
|
||||
|
@ -59,7 +60,7 @@ const (
|
|||
respDurationKey = "duration"
|
||||
)
|
||||
|
||||
// Mysql represents MySQL output bindings
|
||||
// Mysql represents MySQL output bindings.
|
||||
type Mysql struct {
|
||||
db *sql.DB
|
||||
logger logger.Logger
|
||||
|
@ -67,12 +68,12 @@ type Mysql struct {
|
|||
|
||||
var _ = bindings.OutputBinding(&Mysql{})
|
||||
|
||||
// NewMysql returns a new MySQL output binding
|
||||
// NewMysql returns a new MySQL output binding.
|
||||
func NewMysql(logger logger.Logger) *Mysql {
|
||||
return &Mysql{logger: logger}
|
||||
}
|
||||
|
||||
// Init initializes the MySQL binding
|
||||
// Init initializes the MySQL binding.
|
||||
func (m *Mysql) Init(metadata bindings.Metadata) error {
|
||||
m.logger.Debug("Initializing MySql binding")
|
||||
|
||||
|
@ -117,7 +118,7 @@ func (m *Mysql) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Invoke handles all invoke operations
|
||||
// Invoke handles all invoke operations.
|
||||
func (m *Mysql) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
if req == nil {
|
||||
return nil, errors.Errorf("invoke request required")
|
||||
|
@ -151,14 +152,14 @@ func (m *Mysql) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, e
|
|||
case execOperation:
|
||||
r, err := m.exec(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error executing %s with %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
resp.Metadata[respRowsAffectedKey] = strconv.FormatInt(r, 10)
|
||||
|
||||
case queryOperation:
|
||||
d, err := m.query(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error executing %s with %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
resp.Data = d
|
||||
|
||||
|
@ -174,7 +175,7 @@ func (m *Mysql) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, e
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by Mysql binding
|
||||
// Operations returns list of operations supported by Mysql binding.
|
||||
func (m *Mysql) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
execOperation,
|
||||
|
@ -183,7 +184,7 @@ func (m *Mysql) Operations() []bindings.OperationKind {
|
|||
}
|
||||
}
|
||||
|
||||
// Close will close the DB
|
||||
// Close will close the DB.
|
||||
func (m *Mysql) Close() error {
|
||||
if m.db != nil {
|
||||
return m.db.Close()
|
||||
|
@ -192,12 +193,12 @@ func (m *Mysql) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Mysql) query(s string) ([]byte, error) {
|
||||
m.logger.Debugf("query: %s", s)
|
||||
func (m *Mysql) query(sql string) ([]byte, error) {
|
||||
m.logger.Debugf("query: %s", sql)
|
||||
|
||||
rows, err := m.db.Query(s)
|
||||
rows, err := m.db.Query(sql)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error executing %s", s)
|
||||
return nil, errors.Wrapf(err, "error executing %s", sql)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -207,7 +208,7 @@ func (m *Mysql) query(s string) ([]byte, error) {
|
|||
|
||||
result, err := m.jsonify(rows)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error marshalling query result for %s", s)
|
||||
return nil, errors.Wrapf(err, "error marshalling query result for %s", sql)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
|
|
@ -12,12 +12,16 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
// MySQL doesn't accept RFC3339 formatted time, rejects trailing 'Z' for UTC indicator.
|
||||
mySQLDateTimeFormat = "2006-01-02 15:04:05"
|
||||
|
||||
testCreateTable = `CREATE TABLE IF NOT EXISTS foo (
|
||||
id bigint NOT NULL,
|
||||
v1 character varying(50) NOT NULL,
|
||||
|
@ -84,7 +88,7 @@ func TestMysqlIntegration(t *testing.T) {
|
|||
t.Run("Invoke insert", func(t *testing.T) {
|
||||
req.Operation = execOperation
|
||||
for i := 0; i < 10; i++ {
|
||||
req.Metadata[commandSQLKey] = fmt.Sprintf(testInsert, i, i, true, time.Now().Format(time.RFC3339))
|
||||
req.Metadata[commandSQLKey] = fmt.Sprintf(testInsert, i, i, true, time.Now().Format(mySQLDateTimeFormat))
|
||||
res, err := b.Invoke(req)
|
||||
assertResponse(t, res, err)
|
||||
}
|
||||
|
@ -93,7 +97,7 @@ func TestMysqlIntegration(t *testing.T) {
|
|||
t.Run("Invoke update", func(t *testing.T) {
|
||||
req.Operation = execOperation
|
||||
for i := 0; i < 10; i++ {
|
||||
req.Metadata[commandSQLKey] = fmt.Sprintf(testUpdate, time.Now().Format(time.RFC3339), i)
|
||||
req.Metadata[commandSQLKey] = fmt.Sprintf(testUpdate, time.Now().Format(mySQLDateTimeFormat), i)
|
||||
res, err := b.Invoke(req)
|
||||
assertResponse(t, res, err)
|
||||
}
|
||||
|
@ -153,5 +157,7 @@ func TestMysqlIntegration(t *testing.T) {
|
|||
func assertResponse(t *testing.T, res *bindings.InvokeResponse, err error) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, res)
|
||||
assert.NotNil(t, res.Metadata)
|
||||
if res != nil {
|
||||
assert.NotNil(t, res.Metadata)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,9 +7,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
package bindings
|
||||
|
||||
// OutputBinding is the interface for an output binding, allowing users to invoke remote systems with optional payloads
|
||||
// OutputBinding is the interface for an output binding, allowing users to invoke remote systems with optional payloads.
|
||||
type OutputBinding interface {
|
||||
Init(metadata Metadata) error
|
||||
Invoke(req *InvokeRequest) (*InvokeResponse, error)
|
||||
|
|
|
@ -11,10 +11,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// List of operations.
|
||||
|
@ -27,7 +28,7 @@ const (
|
|||
commandSQLKey = "sql"
|
||||
)
|
||||
|
||||
// Postgres represents PostgreSQL output binding
|
||||
// Postgres represents PostgreSQL output binding.
|
||||
type Postgres struct {
|
||||
logger logger.Logger
|
||||
db *pgxpool.Pool
|
||||
|
@ -35,12 +36,12 @@ type Postgres struct {
|
|||
|
||||
var _ = bindings.OutputBinding(&Postgres{})
|
||||
|
||||
// NewPostgres returns a new PostgreSQL output binding
|
||||
// NewPostgres returns a new PostgreSQL output binding.
|
||||
func NewPostgres(logger logger.Logger) *Postgres {
|
||||
return &Postgres{logger: logger}
|
||||
}
|
||||
|
||||
// Init initializes the PostgreSql binding
|
||||
// Init initializes the PostgreSql binding.
|
||||
func (p *Postgres) Init(metadata bindings.Metadata) error {
|
||||
url, ok := metadata.Properties[connectionURLKey]
|
||||
if !ok || url == "" {
|
||||
|
@ -60,7 +61,7 @@ func (p *Postgres) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by PostgreSql binding
|
||||
// Operations returns list of operations supported by PostgreSql binding.
|
||||
func (p *Postgres) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{
|
||||
execOperation,
|
||||
|
@ -69,7 +70,7 @@ func (p *Postgres) Operations() []bindings.OperationKind {
|
|||
}
|
||||
}
|
||||
|
||||
// Invoke handles all invoke operations
|
||||
// Invoke handles all invoke operations.
|
||||
func (p *Postgres) Invoke(req *bindings.InvokeRequest) (resp *bindings.InvokeResponse, err error) {
|
||||
if req == nil {
|
||||
return nil, errors.Errorf("invoke request required")
|
||||
|
@ -129,7 +130,7 @@ func (p *Postgres) Invoke(req *bindings.InvokeRequest) (resp *bindings.InvokeRes
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// Close close PostgreSql instance
|
||||
// Close close PostgreSql instance.
|
||||
func (p *Postgres) Close() error {
|
||||
if p.db == nil {
|
||||
return nil
|
||||
|
|
|
@ -11,9 +11,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -10,18 +10,19 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/keighl/postmark"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/keighl/postmark"
|
||||
)
|
||||
|
||||
// Postmark allows sending of emails using the 3rd party Postmark service
|
||||
// Postmark allows sending of emails using the 3rd party Postmark service.
|
||||
type Postmark struct {
|
||||
metadata postmarkMetadata
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
// Our metadata holds standard email properties
|
||||
// Our metadata holds standard email properties.
|
||||
type postmarkMetadata struct {
|
||||
ServerToken string `json:"serverToken"`
|
||||
AccountToken string `json:"accountToken"`
|
||||
|
@ -32,12 +33,12 @@ type postmarkMetadata struct {
|
|||
EmailBcc string `json:"emailBcc"`
|
||||
}
|
||||
|
||||
// NewPostmark returns a new Postmark bindings instance
|
||||
// NewPostmark returns a new Postmark bindings instance.
|
||||
func NewPostmark(logger logger.Logger) *Postmark {
|
||||
return &Postmark{logger: logger}
|
||||
}
|
||||
|
||||
// Helper to parse metadata
|
||||
// Helper to parse metadata.
|
||||
func (p *Postmark) parseMetadata(meta bindings.Metadata) (postmarkMetadata, error) {
|
||||
pMeta := postmarkMetadata{}
|
||||
|
||||
|
@ -63,7 +64,7 @@ func (p *Postmark) parseMetadata(meta bindings.Metadata) (postmarkMetadata, erro
|
|||
return pMeta, nil
|
||||
}
|
||||
|
||||
// Init does metadata parsing and not much else :)
|
||||
// Init does metadata parsing and not much else :).
|
||||
func (p *Postmark) Init(metadata bindings.Metadata) error {
|
||||
// Parse input metadata
|
||||
meta, err := p.parseMetadata(metadata)
|
||||
|
@ -77,12 +78,12 @@ func (p *Postmark) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by Postmark binding
|
||||
// Operations returns list of operations supported by Postmark binding.
|
||||
func (p *Postmark) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
||||
// Invoke does the work of sending message to Postmark API
|
||||
// Invoke does the work of sending message to Postmark API.
|
||||
func (p *Postmark) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
// We allow two possible sources of the properties we need,
|
||||
// the component metadata or request metadata, request takes priority if present
|
||||
|
|
|
@ -8,9 +8,10 @@ package postmark
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -12,10 +12,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/streadway/amqp"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
contrib_metadata "github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -32,7 +33,7 @@ const (
|
|||
defaultBitSize = 0
|
||||
)
|
||||
|
||||
// RabbitMQ allows sending/receiving data to/from RabbitMQ
|
||||
// RabbitMQ allows sending/receiving data to/from RabbitMQ.
|
||||
type RabbitMQ struct {
|
||||
connection *amqp.Connection
|
||||
channel *amqp.Channel
|
||||
|
@ -41,7 +42,7 @@ type RabbitMQ struct {
|
|||
queue amqp.Queue
|
||||
}
|
||||
|
||||
// Metadata is the rabbitmq config
|
||||
// Metadata is the rabbitmq config.
|
||||
type rabbitMQMetadata struct {
|
||||
Host string `json:"host"`
|
||||
QueueName string `json:"queueName"`
|
||||
|
@ -53,12 +54,12 @@ type rabbitMQMetadata struct {
|
|||
defaultQueueTTL *time.Duration
|
||||
}
|
||||
|
||||
// NewRabbitMQ returns a new rabbitmq instance
|
||||
// NewRabbitMQ returns a new rabbitmq instance.
|
||||
func NewRabbitMQ(logger logger.Logger) *RabbitMQ {
|
||||
return &RabbitMQ{logger: logger}
|
||||
}
|
||||
|
||||
// Init does metadata parsing and connection creation
|
||||
// Init does metadata parsing and connection creation.
|
||||
func (r *RabbitMQ) Init(metadata bindings.Metadata) error {
|
||||
err := r.parseMetadata(metadata)
|
||||
if err != nil {
|
||||
|
@ -99,6 +100,12 @@ func (r *RabbitMQ) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse
|
|||
Body: req.Data,
|
||||
}
|
||||
|
||||
contentType, ok := contrib_metadata.TryGetContentType(req.Metadata)
|
||||
|
||||
if ok {
|
||||
pub.ContentType = contentType
|
||||
}
|
||||
|
||||
ttl, ok, err := contrib_metadata.TryGetTTL(req.Metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -9,10 +9,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// Redis is a redis output binding
|
||||
// Redis is a redis output binding.
|
||||
type Redis struct {
|
||||
client redis.UniversalClient
|
||||
clientSettings *rediscomponent.Settings
|
||||
|
@ -27,12 +27,12 @@ type Redis struct {
|
|||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewRedis returns a new redis bindings instance
|
||||
// NewRedis returns a new redis bindings instance.
|
||||
func NewRedis(logger logger.Logger) *Redis {
|
||||
return &Redis{logger: logger}
|
||||
}
|
||||
|
||||
// Init performs metadata parsing and connection creation
|
||||
// Init performs metadata parsing and connection creation.
|
||||
func (r *Redis) Init(meta bindings.Metadata) (err error) {
|
||||
r.client, r.clientSettings, err = rediscomponent.ParseClientFromProperties(meta.Properties, nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -10,14 +10,14 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
// InvokeRequest is the object given to a dapr output binding
|
||||
// InvokeRequest is the object given to a dapr output binding.
|
||||
type InvokeRequest struct {
|
||||
Data []byte `json:"data"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
Operation OperationKind `json:"operation"`
|
||||
}
|
||||
|
||||
// OperationKind defines an output binding operation
|
||||
// OperationKind defines an output binding operation.
|
||||
type OperationKind string
|
||||
|
||||
// Non exhaustive list of operations. A binding can add operations that are not in this list.
|
||||
|
@ -28,7 +28,7 @@ const (
|
|||
ListOperation OperationKind = "list"
|
||||
)
|
||||
|
||||
// GetMetadataAsBool parses metadata as bool
|
||||
// GetMetadataAsBool parses metadata as bool.
|
||||
func (r *InvokeRequest) GetMetadataAsBool(key string) (bool, error) {
|
||||
if val, ok := r.Metadata[key]; ok {
|
||||
boolVal, err := strconv.ParseBool(val)
|
||||
|
@ -42,7 +42,7 @@ func (r *InvokeRequest) GetMetadataAsBool(key string) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// GetMetadataAsBool parses metadata as int64
|
||||
// GetMetadataAsInt64 parses metadata as int64.
|
||||
func (r *InvokeRequest) GetMetadataAsInt64(key string, bitSize int) (int64, error) {
|
||||
if val, ok := r.Metadata[key]; ok {
|
||||
intVal, err := strconv.ParseInt(val, 10, bitSize)
|
||||
|
|
|
@ -9,13 +9,13 @@ import (
|
|||
"github.com/dapr/components-contrib/state"
|
||||
)
|
||||
|
||||
// ReadResponse is the return object from an dapr input binding
|
||||
// ReadResponse is the return object from an dapr input binding.
|
||||
type ReadResponse struct {
|
||||
Data []byte `json:"data"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
}
|
||||
|
||||
// AppResponse is the object describing the response from user code after a bindings event
|
||||
// AppResponse is the object describing the response from user code after a bindings event.
|
||||
type AppResponse struct {
|
||||
Data interface{} `json:"data"`
|
||||
To []string `json:"to"`
|
||||
|
@ -24,7 +24,7 @@ type AppResponse struct {
|
|||
Concurrency string `json:"concurrency"`
|
||||
}
|
||||
|
||||
// InvokeResponse is the response object returned from an output binding
|
||||
// InvokeResponse is the response object returned from an output binding.
|
||||
type InvokeResponse struct {
|
||||
Data []byte `json:"data"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
|
|
|
@ -12,9 +12,10 @@ import (
|
|||
"time"
|
||||
|
||||
r "github.com/dancannon/gorethink"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Binding represents RethinkDB change change state input binding which fires handler with
|
||||
|
@ -26,7 +27,7 @@ type Binding struct {
|
|||
stopCh chan bool
|
||||
}
|
||||
|
||||
// StateConfig is the binding config
|
||||
// StateConfig is the binding config.
|
||||
type StateConfig struct {
|
||||
r.ConnectOpts
|
||||
Table string `json:"table"`
|
||||
|
@ -34,7 +35,7 @@ type StateConfig struct {
|
|||
|
||||
var _ = bindings.InputBinding(&Binding{})
|
||||
|
||||
// NewRethinkDBStateChangeBinding returns a new RethinkDB actor event input binding
|
||||
// NewRethinkDBStateChangeBinding returns a new RethinkDB actor event input binding.
|
||||
func NewRethinkDBStateChangeBinding(logger logger.Logger) *Binding {
|
||||
return &Binding{
|
||||
logger: logger,
|
||||
|
@ -42,7 +43,7 @@ func NewRethinkDBStateChangeBinding(logger logger.Logger) *Binding {
|
|||
}
|
||||
}
|
||||
|
||||
// Init initializes the RethinkDB binding
|
||||
// Init initializes the RethinkDB binding.
|
||||
func (b *Binding) Init(metadata bindings.Metadata) error {
|
||||
cfg, err := metadataToConfig(metadata.Properties, b.logger)
|
||||
if err != nil {
|
||||
|
@ -59,7 +60,7 @@ func (b *Binding) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Read triggers the RethinkDB scheduler
|
||||
// Read triggers the RethinkDB scheduler.
|
||||
func (b *Binding) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
b.logger.Infof("subscribing to state changes in %s.%s...", b.config.Database, b.config.Table)
|
||||
cursor, err := r.DB(b.config.Database).Table(b.config.Table).Changes(r.ChangesOpts{
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func getTestMetadata() map[string]string {
|
||||
|
|
|
@ -12,9 +12,10 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/gomail.v2"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"gopkg.in/gomail.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24,13 +25,13 @@ const (
|
|||
mailSeparator = ";"
|
||||
)
|
||||
|
||||
// Mailer allows sending of emails using the Simple Mail Transfer Protocol
|
||||
// Mailer allows sending of emails using the Simple Mail Transfer Protocol.
|
||||
type Mailer struct {
|
||||
metadata Metadata
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
// Metadata holds standard email properties
|
||||
// Metadata holds standard email properties.
|
||||
type Metadata struct {
|
||||
Host string `json:"host"`
|
||||
Port int `json:"port"`
|
||||
|
@ -45,12 +46,12 @@ type Metadata struct {
|
|||
Priority int `json:"priority"`
|
||||
}
|
||||
|
||||
// NewSMTP returns a new smtp binding instance
|
||||
// NewSMTP returns a new smtp binding instance.
|
||||
func NewSMTP(logger logger.Logger) *Mailer {
|
||||
return &Mailer{logger: logger}
|
||||
}
|
||||
|
||||
// Init smtp component (parse metadata)
|
||||
// Init smtp component (parse metadata).
|
||||
func (s *Mailer) Init(metadata bindings.Metadata) error {
|
||||
// parse metadata
|
||||
meta, err := s.parseMetadata(metadata)
|
||||
|
@ -62,12 +63,12 @@ func (s *Mailer) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Operations returns the allowed binding operations
|
||||
// Operations returns the allowed binding operations.
|
||||
func (s *Mailer) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
||||
// Invoke sends an email message
|
||||
// Invoke sends an email message.
|
||||
func (s *Mailer) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
// Merge config metadata with request metadata
|
||||
metadata, err := s.metadata.mergeWithRequestMetadata(req)
|
||||
|
@ -119,7 +120,7 @@ func (s *Mailer) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse,
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Helper to parse metadata
|
||||
// Helper to parse metadata.
|
||||
func (s *Mailer) parseMetadata(meta bindings.Metadata) (Metadata, error) {
|
||||
smtpMeta := Metadata{}
|
||||
|
||||
|
@ -170,7 +171,7 @@ func (s *Mailer) parseMetadata(meta bindings.Metadata) (Metadata, error) {
|
|||
return smtpMeta, nil
|
||||
}
|
||||
|
||||
// Helper to merge config and request metadata
|
||||
// Helper to merge config and request metadata.
|
||||
func (metadata Metadata) mergeWithRequestMetadata(req *bindings.InvokeRequest) (Metadata, error) {
|
||||
merged := metadata
|
||||
|
||||
|
|
|
@ -8,9 +8,10 @@ package smtp
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -12,19 +12,20 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/sendgrid/sendgrid-go"
|
||||
"github.com/sendgrid/sendgrid-go/helpers/mail"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// SendGrid allows sending of emails using the 3rd party SendGrid service
|
||||
// SendGrid allows sending of emails using the 3rd party SendGrid service.
|
||||
type SendGrid struct {
|
||||
metadata sendGridMetadata
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
// Our metadata holds standard email properties
|
||||
// Our metadata holds standard email properties.
|
||||
type sendGridMetadata struct {
|
||||
APIKey string `json:"apiKey"`
|
||||
EmailFrom string `json:"emailFrom"`
|
||||
|
@ -34,7 +35,7 @@ type sendGridMetadata struct {
|
|||
EmailBcc string `json:"emailBcc"`
|
||||
}
|
||||
|
||||
// Wrapper to help decode SendGrid API errors
|
||||
// Wrapper to help decode SendGrid API errors.
|
||||
type sendGridRestError struct {
|
||||
Errors []struct {
|
||||
Field interface{} `json:"field"`
|
||||
|
@ -43,12 +44,12 @@ type sendGridRestError struct {
|
|||
} `json:"errors"`
|
||||
}
|
||||
|
||||
// NewSendGrid returns a new SendGrid bindings instance
|
||||
// NewSendGrid returns a new SendGrid bindings instance.
|
||||
func NewSendGrid(logger logger.Logger) *SendGrid {
|
||||
return &SendGrid{logger: logger}
|
||||
}
|
||||
|
||||
// Helper to parse metadata
|
||||
// Helper to parse metadata.
|
||||
func (sg *SendGrid) parseMetadata(meta bindings.Metadata) (sendGridMetadata, error) {
|
||||
sgMeta := sendGridMetadata{}
|
||||
|
||||
|
@ -69,7 +70,7 @@ func (sg *SendGrid) parseMetadata(meta bindings.Metadata) (sendGridMetadata, err
|
|||
return sgMeta, nil
|
||||
}
|
||||
|
||||
// Init does metadata parsing and not much else :)
|
||||
// Init does metadata parsing and not much else :).
|
||||
func (sg *SendGrid) Init(metadata bindings.Metadata) error {
|
||||
// Parse input metadata
|
||||
meta, err := sg.parseMetadata(metadata)
|
||||
|
@ -87,7 +88,7 @@ func (sg *SendGrid) Operations() []bindings.OperationKind {
|
|||
return []bindings.OperationKind{bindings.CreateOperation}
|
||||
}
|
||||
|
||||
// Write does the work of sending message to SendGrid API
|
||||
// Write does the work of sending message to SendGrid API.
|
||||
func (sg *SendGrid) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
// We allow two possible sources of the properties we need,
|
||||
// the component metadata or request metadata, request takes priority if present
|
||||
|
|
|
@ -8,9 +8,10 @@ package sendgrid
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -13,9 +13,10 @@ import (
|
|||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockTransport struct {
|
||||
|
|
|
@ -14,14 +14,15 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/dghubble/go-twitter/twitter"
|
||||
"github.com/dghubble/oauth1"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// Binding represents Twitter input/output binding
|
||||
// Binding represents Twitter input/output binding.
|
||||
type Binding struct {
|
||||
client *twitter.Client
|
||||
query string
|
||||
|
@ -30,12 +31,12 @@ type Binding struct {
|
|||
|
||||
var _ = bindings.InputBinding(&Binding{})
|
||||
|
||||
// NewTwitter returns a new Twitter event input binding
|
||||
// NewTwitter returns a new Twitter event input binding.
|
||||
func NewTwitter(logger logger.Logger) *Binding {
|
||||
return &Binding{logger: logger}
|
||||
}
|
||||
|
||||
// Init initializes the Twitter binding
|
||||
// Init initializes the Twitter binding.
|
||||
func (t *Binding) Init(metadata bindings.Metadata) error {
|
||||
ck, f := metadata.Properties["consumerKey"]
|
||||
if !f || ck == "" {
|
||||
|
@ -70,12 +71,12 @@ func (t *Binding) Init(metadata bindings.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Operations returns list of operations supported by twitter binding
|
||||
// Operations returns list of operations supported by twitter binding.
|
||||
func (t *Binding) Operations() []bindings.OperationKind {
|
||||
return []bindings.OperationKind{bindings.GetOperation}
|
||||
}
|
||||
|
||||
// Read triggers the Twitter search and events on each result tweet
|
||||
// Read triggers the Twitter search and events on each result tweet.
|
||||
func (t *Binding) Read(handler func(*bindings.ReadResponse) ([]byte, error)) error {
|
||||
if t.query == "" {
|
||||
return nil
|
||||
|
@ -144,7 +145,7 @@ func (t *Binding) Read(handler func(*bindings.ReadResponse) ([]byte, error)) err
|
|||
return nil
|
||||
}
|
||||
|
||||
// Invoke handles all operations
|
||||
// Invoke handles all operations.
|
||||
func (t *Binding) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
t.logger.Debugf("operation: %v", req.Operation)
|
||||
if req.Metadata == nil {
|
||||
|
|
|
@ -10,10 +10,11 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/dghubble/go-twitter/twitter"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -44,7 +45,7 @@ func getRuntimeMetadata() map[string]string {
|
|||
}
|
||||
}
|
||||
|
||||
// go test -v -count=1 ./bindings/twitter/
|
||||
// go test -v -count=1 ./bindings/twitter/.
|
||||
func TestInit(t *testing.T) {
|
||||
m := getTestMetadata()
|
||||
tw := NewTwitter(logger.NewLogger("test"))
|
||||
|
@ -53,7 +54,7 @@ func TestInit(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestReadError excutes the Read method and fails before the Twitter API call
|
||||
// go test -v -count=1 -run TestReadError ./bindings/twitter/
|
||||
// go test -v -count=1 -run TestReadError ./bindings/twitter/.
|
||||
func TestReadError(t *testing.T) {
|
||||
tw := NewTwitter(logger.NewLogger("test"))
|
||||
m := getTestMetadata()
|
||||
|
@ -69,7 +70,7 @@ func TestReadError(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestRead executes the Read method which calls Twiter API
|
||||
// env RUN_LIVE_TW_TEST=true go test -v -count=1 -run TestReed ./bindings/twitter/
|
||||
// env RUN_LIVE_TW_TEST=true go test -v -count=1 -run TestReed ./bindings/twitter/.
|
||||
func TestReed(t *testing.T) {
|
||||
if os.Getenv("RUN_LIVE_TW_TEST") != "true" {
|
||||
t.SkipNow() // skip this test until able to read credentials in test infra
|
||||
|
@ -99,7 +100,7 @@ func TestReed(t *testing.T) {
|
|||
|
||||
// TestInvoke executes the Invoke method which calls Twiter API
|
||||
// test tokens must be set
|
||||
// env RUN_LIVE_TW_TEST=true go test -v -count=1 -run TestInvoke ./bindings/twitter/
|
||||
// env RUN_LIVE_TW_TEST=true go test -v -count=1 -run TestInvoke ./bindings/twitter/.
|
||||
func TestInvoke(t *testing.T) {
|
||||
if os.Getenv("RUN_LIVE_TW_TEST") != "true" {
|
||||
t.SkipNow() // skip this test until able to read credentials in test infra
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"errors"
|
||||
|
||||
"github.com/camunda-cloud/zeebe/clients/go/pkg/zbc"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
@ -17,7 +18,7 @@ import (
|
|||
|
||||
var ErrMissingGatewayAddr = errors.New("gatewayAddr is a required attribute")
|
||||
|
||||
// ClientFactory enables injection for testing
|
||||
// ClientFactory enables injection for testing.
|
||||
type ClientFactory interface {
|
||||
Get(metadata bindings.Metadata) (zbc.Client, error)
|
||||
}
|
||||
|
@ -34,7 +35,7 @@ type clientMetadata struct {
|
|||
UsePlaintextConnection bool `json:"usePlainTextConnection,string"`
|
||||
}
|
||||
|
||||
// NewClientFactoryImpl returns a new ClientFactory instance
|
||||
// NewClientFactoryImpl returns a new ClientFactory instance.
|
||||
func NewClientFactoryImpl(logger logger.Logger) *ClientFactoryImpl {
|
||||
return &ClientFactoryImpl{logger: logger}
|
||||
}
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
|
|
|
@ -14,10 +14,11 @@ import (
|
|||
"github.com/camunda-cloud/zeebe/clients/go/pkg/commands"
|
||||
"github.com/camunda-cloud/zeebe/clients/go/pkg/entities"
|
||||
"github.com/camunda-cloud/zeebe/clients/go/pkg/zbc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
contrib_metadata "github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockActivateJobsClient struct {
|
||||
|
|
|
@ -13,9 +13,10 @@ import (
|
|||
"github.com/camunda-cloud/zeebe/clients/go/pkg/commands"
|
||||
"github.com/camunda-cloud/zeebe/clients/go/pkg/pb"
|
||||
"github.com/camunda-cloud/zeebe/clients/go/pkg/zbc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockCancelInstanceClient struct {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue