feature: add context to state API

Signed-off-by: 1046102779 <seachen@tencent.com>
This commit is contained in:
1046102779 2022-12-08 15:56:51 +08:00
commit 2dd7433a59
324 changed files with 13329 additions and 4205 deletions

View File

@ -16,6 +16,8 @@ for attempt in `seq $MAX_ATTEMPTS`; do
if vault status && if vault status &&
vault kv put secret/dapr/conftestsecret conftestsecret=abcd && vault kv put secret/dapr/conftestsecret conftestsecret=abcd &&
vault kv put secret/dapr/secondsecret secondsecret=efgh && vault kv put secret/dapr/secondsecret secondsecret=efgh &&
vault kv put secret/secretWithNoPrefix noPrefixKey=noProblem &&
vault kv put secret/alternativePrefix/secretUnderAlternativePrefix altPrefixKey=altPrefixValue &&
vault kv put secret/dapr/multiplekeyvaluessecret first=1 second=2 third=3; vault kv put secret/dapr/multiplekeyvaluessecret first=1 second=2 third=3;
then then
echo ✅ secrets set; echo ✅ secrets set;

View File

@ -33,3 +33,5 @@ require (
google.golang.org/protobuf v1.28.1 // indirect google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )
replace github.com/dapr/dapr => github.com/1046102779/dapr v0.0.0-20221021130037-635b70c24259

View File

@ -1,8 +1,9 @@
version: '3.9' version: '3.9'
# Use a YAML reference to define VAULT_TOKEN and DOCKER_IMAGE only once # Use a YAML reference to define VAULT_TOKEN and DOCKER_IMAGE only once
x-common-vaues: x-common-values:
# This should match tests/config/secrestore/hashicorp/vault/hashicorp-vault.yaml # This should match tests/config/secrestore/hashicorp/vault/hashicorp-vault.yaml
# This should match .github/infrastructure/conformance/hashicorp/vault_token_file.txt
vault_token: &VAULT_TOKEN "vault-dev-root-token-id" vault_token: &VAULT_TOKEN "vault-dev-root-token-id"
# Reuse the same docker image to save on resources and because the base vault image # Reuse the same docker image to save on resources and because the base vault image
# has everything we need for seeding the initial key values too. # has everything we need for seeding the initial key values too.

View File

@ -0,0 +1,8 @@
version: '2'
services:
rabbitmq:
image: kubemq/kubemq-community:latest
ports:
- 8080:8080
- 9090:9090
- 50000:50000

224
.github/scripts/dapr_bot.js vendored Normal file
View File

@ -0,0 +1,224 @@
// list of owner who can control dapr-bot workflow
// TODO: Read owners from OWNERS file.
const owners = [
"yaron2",
"berndverst",
"artursouza",
"mukundansundar",
"halspang",
"tanvigour",
"pkedy",
"amuluyavarote",
"daixiang0",
"ItalyPaleAle",
"jjcollinge",
"pravinpushkar",
"shivamkm07",
"shubham1172",
"skyao",
"msfussell",
"Taction",
"RyanLettieri",
"DeepanshuA",
"yash-nisar",
"addjuarez",
"tmacam",
];
const docsIssueBodyTpl = (issueNumber) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml) because a \"documentation required\" label \
was added to dapr/components-contrib#${issueNumber}. \n\n\
TODO: Add more details as per [this template](.github/ISSUE_TEMPLATE/new-content-needed.md).`;
const newComponentBodyTpl = (issueNumber) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml) because a \"new component\" label \
was added to dapr/components-contrib#${issueNumber}. \n\n\
Please register the component in [cmd/daprd/components](https://github.com/dapr/dapr/tree/master/cmd/daprd/components), \
similar to the ones in the folder (one file per component).`;
module.exports = async ({ github, context }) => {
if (context.eventName == "issue_comment" && context.payload.action == "created") {
await handleIssueCommentCreate({ github, context });
} else if ((context.eventName == "issues" || context.eventName == "pull_request") && context.payload.action == "labeled") {
await handleIssueOrPrLabeled({ github, context });
} else {
console.log(`[main] event ${context.eventName} not supported, exiting.`);
}
}
/**
* Handle issue comment create event.
*/
async function handleIssueCommentCreate({ github, context }) {
const payload = context.payload;
const issue = context.issue;
const username = context.actor;
const isFromPulls = !!payload.issue.pull_request;
const commentBody = payload.comment.body;
if (!commentBody) {
console.log("[handleIssueCommentCreate] comment body not found, exiting.");
return;
}
const command = commentBody.split(" ")[0];
// Commands that can be executed by anyone.
if (command === "/assign") {
await cmdAssign(github, issue, username, isFromPulls);
return;
}
// Commands that can only be executed by owners.
if (owners.indexOf(username) < 0) {
console.log(`[handleIssueCommentCreate] user ${username} is not an owner, exiting.`);
return;
}
switch (command) {
case "/make-me-laugh":
await cmdMakeMeLaugh(github, issue);
break;
case "/ok-to-test":
await cmdOkToTest(github, issue, isFromPulls);
break;
default:
console.log(`[handleIssueCommentCreate] command ${command} not found, exiting.`);
break;
}
}
/**
* Handle issue or PR labeled event.
*/
async function handleIssueOrPrLabeled({ github, context }) {
const payload = context.payload;
const label = payload.label.name;
const issueNumber = payload.issue.number;
// This should not run in forks.
if (context.repo.owner !== "dapr") {
console.log("[handleIssueOrPrLabeled] not running in dapr repo, exiting.");
return;
}
// Authorization is not required here because it's triggered by an issue label event.
// Only authorized users can add labels to issues.
if (label == "documentation required") {
// Open a new docs issue
await github.issues.create({
owner: "dapr",
repo: "docs",
title: `New content needed for dapr/components-contrib#${issueNumber}`,
labels: ["content/missing-information", "created-by/dapr-bot"],
body: docsIssueBodyTpl(issueNumber),
});
} else if (label == "new component") {
// Open a new dapr issue
await github.issues.create({
owner: "dapr",
repo: "dapr",
title: `Component registration for dapr/components-contrib#${issueNumber}`,
labels: ["area/components", "created-by/dapr-bot"],
body: newComponentBodyTpl(issueNumber),
});
} else {
console.log(`[handleIssueOrPrLabeled] label ${label} not supported, exiting.`);
}
}
/**
* Assign the issue to the user who commented.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {*} username GitHub user who commented
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdAssign(github, issue, username, isFromPulls) {
if (isFromPulls) {
console.log("[cmdAssign] pull requests unsupported, skipping command execution.");
return;
} else if (issue.assignees && issue.assignees.length !== 0) {
console.log("[cmdAssign] issue already has assignees, skipping command execution.");
return;
}
await github.issues.addAssignees({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
assignees: [username],
});
}
/**
* Comment a funny joke.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
*/
async function cmdMakeMeLaugh(github, issue) {
const result = await github.request("https://official-joke-api.appspot.com/random_joke");
jokedata = result.data;
joke = "I have a bad feeling about this.";
if (jokedata && jokedata.setup && jokedata.punchline) {
joke = `${jokedata.setup} - ${jokedata.punchline}`;
}
await github.issues.createComment({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
body: joke,
});
}
/**
* Trigger e2e test for the pull request.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdOkToTest(github, issue, isFromPulls) {
if (!isFromPulls) {
console.log("[cmdOkToTest] only pull requests supported, skipping command execution.");
return;
}
// Get pull request
const pull = await github.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number
});
if (pull && pull.data) {
// Get commit id and repo from pull head
const testPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: "ok-to-test",
issue: issue,
};
// Fire repository_dispatch event to trigger certification test
await github.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "certification-test",
client_payload: testPayload,
});
// Fire repository_dispatch event to trigger conformance test
await github.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "conformance-test",
client_payload: testPayload,
});
console.log(`[cmdOkToTest] triggered certification and conformance tests for ${JSON.stringify(testPayload)}`);
}
}

View File

@ -53,6 +53,7 @@ jobs:
- pubsub.mqtt - pubsub.mqtt
- state.mongodb - state.mongodb
- state.redis - state.redis
- state.cockroachdb
- state.postgresql - state.postgresql
- state.cassandra - state.cassandra
- state.memcached - state.memcached
@ -60,6 +61,7 @@ jobs:
- bindings.alicloud.dubbo - bindings.alicloud.dubbo
- bindings.kafka - bindings.kafka
- bindings.redis - bindings.redis
- bindings.cron
- secretstores.local.env - secretstores.local.env
- secretstores.local.file - secretstores.local.file
- secretstores.hashicorp.vault - secretstores.hashicorp.vault
@ -245,7 +247,7 @@ jobs:
working-directory: ${{ env.TEST_PATH }} working-directory: ${{ env.TEST_PATH }}
run: | run: |
echo "Running certification tests for ${{ matrix.component }} ... " echo "Running certification tests for ${{ matrix.component }} ... "
export GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn export GOLANG_PROTOBUF_REGISTRATION_CONFLICT=ignore
set +e set +e
gotestsum --jsonfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.json \ gotestsum --jsonfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.json \
--junitfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.xml --format standard-quiet -- \ --junitfile ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.xml --format standard-quiet -- \

View File

@ -35,7 +35,7 @@ jobs:
GOOS: ${{ matrix.target_os }} GOOS: ${{ matrix.target_os }}
GOARCH: ${{ matrix.target_arch }} GOARCH: ${{ matrix.target_arch }}
GOPROXY: https://proxy.golang.org GOPROXY: https://proxy.golang.org
GOLANGCI_LINT_VER: "v1.48.0" GOLANGCI_LINT_VER: "v1.50.1"
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, windows-latest, macOS-latest] os: [ubuntu-latest, windows-latest, macOS-latest]

View File

@ -48,6 +48,7 @@ jobs:
id: pr-components id: pr-components
run: | run: |
PR_COMPONENTS=$(yq -I0 --tojson eval - << EOF PR_COMPONENTS=$(yq -I0 --tojson eval - << EOF
- bindings.cron
- bindings.http - bindings.http
- bindings.influx - bindings.influx
- bindings.kafka-wurstmeister - bindings.kafka-wurstmeister
@ -57,6 +58,7 @@ jobs:
- bindings.mqtt-vernemq - bindings.mqtt-vernemq
- bindings.postgres - bindings.postgres
- bindings.redis - bindings.redis
- bindings.kubemq
- bindings.rabbitmq - bindings.rabbitmq
- pubsub.aws.snssqs - pubsub.aws.snssqs
- pubsub.hazelcast - pubsub.hazelcast
@ -70,6 +72,7 @@ jobs:
- pubsub.redis - pubsub.redis
- pubsub.kafka-wurstmeister - pubsub.kafka-wurstmeister
- pubsub.kafka-confluent - pubsub.kafka-confluent
- pubsub.kubemq
- secretstores.kubernetes - secretstores.kubernetes
- secretstores.localenv - secretstores.localenv
- secretstores.localfile - secretstores.localfile
@ -82,6 +85,7 @@ jobs:
- state.postgresql - state.postgresql
- state.redis - state.redis
- state.sqlserver - state.sqlserver
- state.in-memory
- state.cockroachdb - state.cockroachdb
- workflows.temporal - workflows.temporal
- state.rethinkdb - state.rethinkdb
@ -331,7 +335,7 @@ jobs:
if: contains(matrix.component, 'mysql.mariadb') if: contains(matrix.component, 'mysql.mariadb')
- name: Start KinD - name: Start KinD
uses: helm/kind-action@v1.0.0 uses: helm/kind-action@v1.4.0
if: contains(matrix.component, 'kubernetes') if: contains(matrix.component, 'kubernetes')
- name: Start postgresql - name: Start postgresql
@ -359,6 +363,10 @@ jobs:
docker-compose -f ./.github/infrastructure/docker-compose-rethinkdb.yml -p rethinkdb up -d docker-compose -f ./.github/infrastructure/docker-compose-rethinkdb.yml -p rethinkdb up -d
if: contains(matrix.component, 'rethinkdb') if: contains(matrix.component, 'rethinkdb')
- name: Start kubemq
run: docker-compose -f ./.github/infrastructure/docker-compose-kubemq.yml -p kubemq up -d
if: contains(matrix.component, 'kubemq')
- name: Setup KinD test data - name: Setup KinD test data
if: contains(matrix.component, 'kubernetes') if: contains(matrix.component, 'kubernetes')
run: | run: |

View File

@ -14,99 +14,24 @@
name: dapr-bot name: dapr-bot
on: on:
issue_comment: {types: created} issue_comment:
types: [created]
issues:
types: [labeled]
pull_request:
types: [labeled]
jobs: jobs:
daprbot: daprbot:
name: bot-processor name: bot-processor
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Comment analyzer - name: Checkout code
uses: actions/github-script@v1 uses: actions/checkout@v2 # required to make the script available for next step
- name: Issue analyzer
uses: actions/github-script@v4
with: with:
github-token: ${{secrets.DAPR_BOT_TOKEN}} github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: | script: |
// list of owner who can control dapr-bot workflow const script = require('./.github/scripts/dapr_bot.js')
// TODO: Read owners from OWNERS file. await script({github, context})
const owners = [
"yaron2",
"berndverst",
"artursouza",
"mukundansundar",
"halspang",
"tanvigour",
"pkedy",
"amuluyavarote",
"daixiang0",
"ItalyPaleAle",
"jjcollinge",
"pravinpushkar",
"shivamkm07",
"shubham1172",
"skyao",
"msfussell",
"Taction",
"RyanLettieri",
"DeepanshuA",
"yash-nisar",
"addjuarez",
"tmacam",
];
const payload = context.payload;
const issue = context.issue;
const isFromPulls = !!payload.issue.pull_request;
const commentBody = payload.comment.body;
if (!isFromPulls && commentBody && commentBody.indexOf("/assign") == 0) {
if (!issue.assignees || issue.assignees.length === 0) {
await github.issues.addAssignees({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
assignees: [context.actor],
})
}
return;
}
// actions above this check are enabled for everyone.
if (owners.indexOf(context.actor) < 0) {
return;
}
if (isFromPulls && commentBody) {
if (commentBody.indexOf("/ok-to-test") == 0) {
// Get pull request
const pull = await github.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number
});
if (pull && pull.data) {
// Get commit id and repo from pull head
const testPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: "ok-to-test",
issue: issue,
};
// Fire repository_dispatch event to trigger certification test
await github.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "certification-test",
client_payload: testPayload,
});
// Fire repository_dispatch event to trigger conformance test
await github.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "conformance-test",
client_payload: testPayload,
});
}
}
}

View File

@ -116,13 +116,14 @@ linters-settings:
# minimal occurrences count to trigger, 3 by default # minimal occurrences count to trigger, 3 by default
min-occurrences: 5 min-occurrences: 5
depguard: depguard:
list-type: blacklist list-type: denylist
include-go-root: false include-go-root: false
packages: packages-with-error-message:
- github.com/Sirupsen/logrus - "github.com/Sirupsen/logrus": "must use github.com/dapr/kit/logger"
packages-with-error-messages: - "github.com/agrea/ptr": "must use github.com/dapr/kit/ptr"
# specify an error message to output when a blacklisted package is used - "github.com/cenkalti/backoff": "must use github.com/cenkalti/backoff/v4"
github.com/Sirupsen/logrus: "must use github.com/dapr/kit/logger" - "github.com/cenkalti/backoff/v2": "must use github.com/cenkalti/backoff/v4"
- "github.com/cenkalti/backoff/v3": "must use github.com/cenkalti/backoff/v4"
misspell: misspell:
# Correct spellings using locale preferences for US or UK. # Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English. # Default is to use a neutral variety of English.
@ -276,3 +277,6 @@ linters:
- rowserrcheck - rowserrcheck
- sqlclosecheck - sqlclosecheck
- structcheck - structcheck
- deadcode
- nosnakecase
- varcheck

View File

@ -37,7 +37,7 @@ func TestPublishMsg(t *testing.T) { //nolint:paralleltest
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte("{\"errcode\":0}")) _, err := w.Write([]byte("{\"errcode\":0}"))
require.NoError(t, err) require.NoError(t, err)
if r.Method != "POST" { if r.Method != http.MethodPost {
t.Errorf("Expected 'POST' request, got '%s'", r.Method) t.Errorf("Expected 'POST' request, got '%s'", r.Method)
} }
if r.URL.EscapedPath() != "/test" { if r.URL.EscapedPath() != "/test" {

View File

@ -20,18 +20,18 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"net/url"
"strconv" "strconv"
"strings"
"time"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
azauth "github.com/dapr/components-contrib/internal/authentication/azure" storageinternal "github.com/dapr/components-contrib/internal/component/azure/blobstorage"
mdutils "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
) )
const ( const (
@ -49,41 +49,23 @@ const (
// Defines the delete snapshots option for the delete operation. // Defines the delete snapshots option for the delete operation.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob#request-headers // See: https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob#request-headers
metadataKeyDeleteSnapshots = "deleteSnapshots" metadataKeyDeleteSnapshots = "deleteSnapshots"
// HTTP headers to be associated with the blob.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types
metadataKeyContentType = "contentType"
metadataKeyContentMD5 = "contentMD5"
metadataKeyContentEncoding = "contentEncoding"
metadataKeyContentLanguage = "contentLanguage"
metadataKeyContentDisposition = "contentDisposition"
metadataKeyCacheControl = "cacheControl"
// Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader. A value
// of zero means that no additional HTTP GET requests will be made.
defaultGetBlobRetryCount = 10
// Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not // Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not
// specify maxresults the server will return up to 5,000 items. // specify maxresults the server will return up to 5,000 items.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs#uri-parameters // See: https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs#uri-parameters
maxResults = 5000 maxResults int32 = 5000
endpointKey = "endpoint"
) )
var ErrMissingBlobName = errors.New("blobName is a required attribute") var ErrMissingBlobName = errors.New("blobName is a required attribute")
// AzureBlobStorage allows saving blobs to an Azure Blob Storage account. // AzureBlobStorage allows saving blobs to an Azure Blob Storage account.
type AzureBlobStorage struct { type AzureBlobStorage struct {
metadata *blobStorageMetadata metadata *storageinternal.BlobStorageMetadata
containerURL azblob.ContainerURL containerClient *container.Client
logger logger.Logger logger logger.Logger
} }
type blobStorageMetadata struct {
AccountName string
Container string
GetBlobRetryCount int
DecodeBase64 bool
PublicAccessLevel azblob.PublicAccessType
}
type createResponse struct { type createResponse struct {
BlobURL string `json:"blobURL"` BlobURL string `json:"blobURL"`
BlobName string `json:"blobName"` BlobName string `json:"blobName"`
@ -111,90 +93,14 @@ func NewAzureBlobStorage(logger logger.Logger) bindings.OutputBinding {
// Init performs metadata parsing. // Init performs metadata parsing.
func (a *AzureBlobStorage) Init(metadata bindings.Metadata) error { func (a *AzureBlobStorage) Init(metadata bindings.Metadata) error {
m, err := a.parseMetadata(metadata) var err error
a.containerClient, a.metadata, err = storageinternal.CreateContainerStorageClient(a.logger, metadata.Properties)
if err != nil { if err != nil {
return err return err
} }
a.metadata = m
credential, env, err := azauth.GetAzureStorageBlobCredentials(a.logger, m.AccountName, metadata.Properties)
if err != nil {
return fmt.Errorf("invalid credentials with error: %s", err.Error())
}
userAgent := "dapr-" + logger.DaprVersion
options := azblob.PipelineOptions{
Telemetry: azblob.TelemetryOptions{Value: userAgent},
}
p := azblob.NewPipeline(credential, options)
var containerURL azblob.ContainerURL
customEndpoint, ok := mdutils.GetMetadataProperty(metadata.Properties, azauth.StorageEndpointKeys...)
if ok && customEndpoint != "" {
URL, parseErr := url.Parse(fmt.Sprintf("%s/%s/%s", customEndpoint, m.AccountName, m.Container))
if parseErr != nil {
return parseErr
}
containerURL = azblob.NewContainerURL(*URL, p)
} else {
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.%s/%s", m.AccountName, env.StorageEndpointSuffix, m.Container))
containerURL = azblob.NewContainerURL(*URL, p)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err = containerURL.Create(ctx, azblob.Metadata{}, m.PublicAccessLevel)
cancel()
// Don't return error, container might already exist
a.logger.Debugf("error creating container: %w", err)
a.containerURL = containerURL
return nil return nil
} }
func (a *AzureBlobStorage) parseMetadata(metadata bindings.Metadata) (*blobStorageMetadata, error) {
var m blobStorageMetadata
if val, ok := mdutils.GetMetadataProperty(metadata.Properties, azauth.StorageAccountNameKeys...); ok && val != "" {
m.AccountName = val
} else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageAccountNameKeys[0])
}
if val, ok := mdutils.GetMetadataProperty(metadata.Properties, azauth.StorageContainerNameKeys...); ok && val != "" {
m.Container = val
} else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageContainerNameKeys[0])
}
m.GetBlobRetryCount = defaultGetBlobRetryCount
if val, ok := metadata.Properties["getBlobRetryCount"]; ok {
n, err := strconv.Atoi(val)
if err != nil || n == 0 {
return nil, fmt.Errorf("invalid getBlobRetryCount field from metadata")
}
m.GetBlobRetryCount = n
}
m.DecodeBase64 = false
if val, ok := metadata.Properties["decodeBase64"]; ok {
n, err := strconv.ParseBool(val)
if err != nil {
return nil, fmt.Errorf("invalid decodeBase64 field from metadata")
}
m.DecodeBase64 = n
}
m.PublicAccessLevel = azblob.PublicAccessType(strings.ToLower(metadata.Properties["publicAccessLevel"]))
// per the Dapr documentation "none" is a valid value
if m.PublicAccessLevel == "none" {
m.PublicAccessLevel = ""
}
if !a.isValidPublicAccessType(m.PublicAccessLevel) {
return nil, fmt.Errorf("invalid public access level: %s; allowed: %s", m.PublicAccessLevel, azblob.PossiblePublicAccessTypeValues())
}
return &m, nil
}
func (a *AzureBlobStorage) Operations() []bindings.OperationKind { func (a *AzureBlobStorage) Operations() []bindings.OperationKind {
return []bindings.OperationKind{ return []bindings.OperationKind{
bindings.CreateOperation, bindings.CreateOperation,
@ -205,44 +111,21 @@ func (a *AzureBlobStorage) Operations() []bindings.OperationKind {
} }
func (a *AzureBlobStorage) create(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (a *AzureBlobStorage) create(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
var blobHTTPHeaders azblob.BlobHTTPHeaders
var blobURL azblob.BlockBlobURL
var blobName string var blobName string
if val, ok := req.Metadata[metadataKeyBlobName]; ok && val != "" { if val, ok := req.Metadata[metadataKeyBlobName]; ok && val != "" {
blobName = val blobName = val
delete(req.Metadata, metadataKeyBlobName) delete(req.Metadata, metadataKeyBlobName)
} else { } else {
blobName = uuid.New().String() id, err := uuid.NewRandom()
if err != nil {
return nil, err
}
blobName = id.String()
} }
blobURL = a.getBlobURL(blobName)
if val, ok := req.Metadata[metadataKeyContentType]; ok && val != "" { blobHTTPHeaders, err := storageinternal.CreateBlobHTTPHeadersFromRequest(req.Metadata, nil, a.logger)
blobHTTPHeaders.ContentType = val if err != nil {
delete(req.Metadata, metadataKeyContentType) return nil, err
}
if val, ok := req.Metadata[metadataKeyContentMD5]; ok && val != "" {
sDec, err := b64.StdEncoding.DecodeString(val)
if err != nil || len(sDec) != 16 {
return nil, fmt.Errorf("the MD5 value specified in Content MD5 is invalid, MD5 value must be 128 bits and base64 encoded")
}
blobHTTPHeaders.ContentMD5 = sDec
delete(req.Metadata, metadataKeyContentMD5)
}
if val, ok := req.Metadata[metadataKeyContentEncoding]; ok && val != "" {
blobHTTPHeaders.ContentEncoding = val
delete(req.Metadata, metadataKeyContentEncoding)
}
if val, ok := req.Metadata[metadataKeyContentLanguage]; ok && val != "" {
blobHTTPHeaders.ContentLanguage = val
delete(req.Metadata, metadataKeyContentLanguage)
}
if val, ok := req.Metadata[metadataKeyContentDisposition]; ok && val != "" {
blobHTTPHeaders.ContentDisposition = val
delete(req.Metadata, metadataKeyContentDisposition)
}
if val, ok := req.Metadata[metadataKeyCacheControl]; ok && val != "" {
blobHTTPHeaders.CacheControl = val
delete(req.Metadata, metadataKeyCacheControl)
} }
d, err := strconv.Unquote(string(req.Data)) d, err := strconv.Unquote(string(req.Data))
@ -258,17 +141,21 @@ func (a *AzureBlobStorage) create(ctx context.Context, req *bindings.InvokeReque
req.Data = decoded req.Data = decoded
} }
_, err = azblob.UploadBufferToBlockBlob(ctx, req.Data, blobURL, azblob.UploadToBlockBlobOptions{ uploadOptions := azblob.UploadBufferOptions{
Parallelism: 16, Metadata: storageinternal.SanitizeMetadata(a.logger, req.Metadata),
Metadata: a.sanitizeMetadata(req.Metadata), HTTPHeaders: &blobHTTPHeaders,
BlobHTTPHeaders: blobHTTPHeaders, TransactionalContentMD5: &blobHTTPHeaders.BlobContentMD5,
}) }
blockBlobClient := a.containerClient.NewBlockBlobClient(blobName)
_, err = blockBlobClient.UploadBuffer(ctx, req.Data, &uploadOptions)
if err != nil { if err != nil {
return nil, fmt.Errorf("error uploading az blob: %w", err) return nil, fmt.Errorf("error uploading az blob: %w", err)
} }
resp := createResponse{ resp := createResponse{
BlobURL: blobURL.String(), BlobURL: blockBlobClient.URL(),
} }
b, err := json.Marshal(resp) b, err := json.Marshal(resp)
if err != nil { if err != nil {
@ -286,23 +173,26 @@ func (a *AzureBlobStorage) create(ctx context.Context, req *bindings.InvokeReque
} }
func (a *AzureBlobStorage) get(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (a *AzureBlobStorage) get(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
var blobURL azblob.BlockBlobURL var blockBlobClient *blockblob.Client
if val, ok := req.Metadata[metadataKeyBlobName]; ok && val != "" { if val, ok := req.Metadata[metadataKeyBlobName]; ok && val != "" {
blobURL = a.getBlobURL(val) blockBlobClient = a.containerClient.NewBlockBlobClient(val)
} else { } else {
return nil, ErrMissingBlobName return nil, ErrMissingBlobName
} }
resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) downloadOptions := azblob.DownloadStreamOptions{
AccessConditions: &blob.AccessConditions{},
}
blobDownloadResponse, err := blockBlobClient.DownloadStream(ctx, &downloadOptions)
if err != nil { if err != nil {
return nil, fmt.Errorf("error downloading az blob: %w", err) return nil, fmt.Errorf("error downloading az blob: %w", err)
} }
reader := blobDownloadResponse.Body
bodyStream := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: a.metadata.GetBlobRetryCount}) defer reader.Close()
blobData, err := io.ReadAll(reader)
data, err := io.ReadAll(bodyStream)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading az blob body: %w", err) return nil, fmt.Errorf("error reading az blob: %w", err)
} }
var metadata map[string]string var metadata map[string]string
@ -311,45 +201,54 @@ func (a *AzureBlobStorage) get(ctx context.Context, req *bindings.InvokeRequest)
return nil, fmt.Errorf("error parsing metadata: %w", err) return nil, fmt.Errorf("error parsing metadata: %w", err)
} }
getPropertiesOptions := blob.GetPropertiesOptions{
AccessConditions: &blob.AccessConditions{},
}
if fetchMetadata { if fetchMetadata {
props, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{}) props, err := blockBlobClient.GetProperties(ctx, &getPropertiesOptions)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading blob metadata: %w", err) return nil, fmt.Errorf("error reading blob metadata: %w", err)
} }
metadata = props.NewMetadata() metadata = props.Metadata
} }
return &bindings.InvokeResponse{ return &bindings.InvokeResponse{
Data: data, Data: blobData,
Metadata: metadata, Metadata: metadata,
}, nil }, nil
} }
func (a *AzureBlobStorage) delete(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (a *AzureBlobStorage) delete(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
var blobURL azblob.BlockBlobURL var blockBlobClient *blockblob.Client
if val, ok := req.Metadata[metadataKeyBlobName]; ok && val != "" { val, ok := req.Metadata[metadataKeyBlobName]
blobURL = a.getBlobURL(val) if !ok || val == "" {
} else {
return nil, ErrMissingBlobName return nil, ErrMissingBlobName
} }
deleteSnapshotsOptions := azblob.DeleteSnapshotsOptionNone var deleteSnapshotsOptions blob.DeleteSnapshotsOptionType
if val, ok := req.Metadata[metadataKeyDeleteSnapshots]; ok && val != "" { if deleteSnapShotOption, ok := req.Metadata[metadataKeyDeleteSnapshots]; ok && val != "" {
deleteSnapshotsOptions = azblob.DeleteSnapshotsOptionType(val) deleteSnapshotsOptions = azblob.DeleteSnapshotsOptionType(deleteSnapShotOption)
if !a.isValidDeleteSnapshotsOptionType(deleteSnapshotsOptions) { if !a.isValidDeleteSnapshotsOptionType(deleteSnapshotsOptions) {
return nil, fmt.Errorf("invalid delete snapshot option type: %s; allowed: %s", return nil, fmt.Errorf("invalid delete snapshot option type: %s; allowed: %s",
deleteSnapshotsOptions, azblob.PossibleDeleteSnapshotsOptionTypeValues()) deleteSnapshotsOptions, azblob.PossibleDeleteSnapshotsOptionTypeValues())
} }
} }
_, err := blobURL.Delete(ctx, deleteSnapshotsOptions, azblob.BlobAccessConditions{}) deleteOptions := blob.DeleteOptions{
DeleteSnapshots: &deleteSnapshotsOptions,
AccessConditions: &blob.AccessConditions{},
}
blockBlobClient = a.containerClient.NewBlockBlobClient(val)
_, err := blockBlobClient.Delete(ctx, &deleteOptions)
return nil, err return nil, err
} }
func (a *AzureBlobStorage) list(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (a *AzureBlobStorage) list(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
options := azblob.ListBlobsSegmentOptions{} options := container.ListBlobsFlatOptions{}
hasPayload := false hasPayload := false
var payload listPayload var payload listPayload
@ -360,50 +259,52 @@ func (a *AzureBlobStorage) list(ctx context.Context, req *bindings.InvokeRequest
} }
hasPayload = true hasPayload = true
} }
if hasPayload { if hasPayload {
options.Details.Copy = payload.Include.Copy options.Include.Copy = payload.Include.Copy
options.Details.Metadata = payload.Include.Metadata options.Include.Metadata = payload.Include.Metadata
options.Details.Snapshots = payload.Include.Snapshots options.Include.Snapshots = payload.Include.Snapshots
options.Details.UncommittedBlobs = payload.Include.UncommittedBlobs options.Include.UncommittedBlobs = payload.Include.UncommittedBlobs
options.Details.Deleted = payload.Include.Deleted options.Include.Deleted = payload.Include.Deleted
} }
if hasPayload && payload.MaxResults != int32(0) { if hasPayload && payload.MaxResults > 0 {
options.MaxResults = payload.MaxResults options.MaxResults = &payload.MaxResults
} else { } else {
options.MaxResults = maxResults options.MaxResults = ptr.Of(maxResults) // cannot get address of constant directly
} }
if hasPayload && payload.Prefix != "" { if hasPayload && payload.Prefix != "" {
options.Prefix = payload.Prefix options.Prefix = &payload.Prefix
} }
var initialMarker azblob.Marker var initialMarker string
if hasPayload && payload.Marker != "" { if hasPayload && payload.Marker != "" {
initialMarker = azblob.Marker{Val: &payload.Marker} initialMarker = payload.Marker
} else { } else {
initialMarker = azblob.Marker{} initialMarker = ""
} }
options.Marker = &initialMarker
var blobs []azblob.BlobItem
metadata := map[string]string{} metadata := map[string]string{}
for currentMaker := initialMarker; currentMaker.NotDone(); { blobs := []*container.BlobItem{}
var listBlob *azblob.ListBlobsFlatSegmentResponse pager := a.containerClient.NewListBlobsFlatPager(&options)
listBlob, err := a.containerURL.ListBlobsFlatSegment(ctx, currentMaker, options)
for pager.More() {
resp, err := pager.NextPage(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("error listing blobs: %w", err) return nil, fmt.Errorf("error listing blobs: %w", err)
} }
blobs = append(blobs, listBlob.Segment.BlobItems...) blobs = append(blobs, resp.Segment.BlobItems...)
numBlobs := len(blobs) numBlobs := len(blobs)
currentMaker = listBlob.NextMarker
metadata[metadataKeyMarker] = *currentMaker.Val
metadata[metadataKeyNumber] = strconv.FormatInt(int64(numBlobs), 10) metadata[metadataKeyNumber] = strconv.FormatInt(int64(numBlobs), 10)
metadata[metadataKeyMarker] = ""
if resp.Marker != nil {
metadata[metadataKeyMarker] = *resp.Marker
}
if options.MaxResults-maxResults > 0 { if *options.MaxResults-maxResults > 0 {
options.MaxResults -= maxResults *options.MaxResults -= maxResults
} else { } else {
break break
} }
@ -435,23 +336,6 @@ func (a *AzureBlobStorage) Invoke(ctx context.Context, req *bindings.InvokeReque
} }
} }
func (a *AzureBlobStorage) getBlobURL(name string) azblob.BlockBlobURL {
blobURL := a.containerURL.NewBlockBlobURL(name)
return blobURL
}
func (a *AzureBlobStorage) isValidPublicAccessType(accessType azblob.PublicAccessType) bool {
validTypes := azblob.PossiblePublicAccessTypeValues()
for _, item := range validTypes {
if item == accessType {
return true
}
}
return false
}
func (a *AzureBlobStorage) isValidDeleteSnapshotsOptionType(accessType azblob.DeleteSnapshotsOptionType) bool { func (a *AzureBlobStorage) isValidDeleteSnapshotsOptionType(accessType azblob.DeleteSnapshotsOptionType) bool {
validTypes := azblob.PossibleDeleteSnapshotsOptionTypeValues() validTypes := azblob.PossibleDeleteSnapshotsOptionTypeValues()
for _, item := range validTypes { for _, item := range validTypes {
@ -462,41 +346,3 @@ func (a *AzureBlobStorage) isValidDeleteSnapshotsOptionType(accessType azblob.De
return false return false
} }
func (a *AzureBlobStorage) sanitizeMetadata(metadata map[string]string) map[string]string {
for key, val := range metadata {
// Keep only letters and digits
n := 0
newKey := make([]byte, len(key))
for i := 0; i < len(key); i++ {
if (key[i] >= 'A' && key[i] <= 'Z') ||
(key[i] >= 'a' && key[i] <= 'z') ||
(key[i] >= '0' && key[i] <= '9') {
newKey[n] = key[i]
n++
}
}
if n != len(key) {
nks := string(newKey[:n])
a.logger.Warnf("metadata key %s contains disallowed characters, sanitized to %s", key, nks)
delete(metadata, key)
metadata[nks] = val
key = nks
}
// Remove all non-ascii characters
n = 0
newVal := make([]byte, len(val))
for i := 0; i < len(val); i++ {
if val[i] > 127 {
continue
}
newVal[n] = val[i]
n++
}
metadata[key] = string(newVal[:n])
}
return metadata
}

View File

@ -17,83 +17,12 @@ import (
"context" "context"
"testing" "testing"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
) )
func TestParseMetadata(t *testing.T) {
m := bindings.Metadata{}
blobStorage := NewAzureBlobStorage(logger.NewLogger("test")).(*AzureBlobStorage)
t.Run("parse all metadata", func(t *testing.T) {
m.Properties = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"getBlobRetryCount": "5",
"decodeBase64": "true",
}
meta, err := blobStorage.parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, "test", meta.Container)
assert.Equal(t, "account", meta.AccountName)
// storageAccessKey is parsed in the azauth package
assert.Equal(t, true, meta.DecodeBase64)
assert.Equal(t, 5, meta.GetBlobRetryCount)
assert.Equal(t, azblob.PublicAccessNone, meta.PublicAccessLevel)
})
t.Run("parse metadata with publicAccessLevel = blob", func(t *testing.T) {
m.Properties = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"publicAccessLevel": "blob",
}
meta, err := blobStorage.parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, azblob.PublicAccessBlob, meta.PublicAccessLevel)
})
t.Run("parse metadata with publicAccessLevel = container", func(t *testing.T) {
m.Properties = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"publicAccessLevel": "container",
}
meta, err := blobStorage.parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, azblob.PublicAccessContainer, meta.PublicAccessLevel)
})
t.Run("parse metadata with invalid publicAccessLevel", func(t *testing.T) {
m.Properties = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"publicAccessLevel": "invalid",
}
_, err := blobStorage.parseMetadata(m)
assert.Error(t, err)
})
t.Run("sanitize metadata if necessary", func(t *testing.T) {
m.Properties = map[string]string{
"somecustomfield": "some-custom-value",
"specialfield": "special:valueÜ",
"not-allowed:": "not-allowed",
}
meta := blobStorage.sanitizeMetadata(m.Properties)
assert.Equal(t, meta["somecustomfield"], "some-custom-value")
assert.Equal(t, meta["specialfield"], "special:value")
assert.Equal(t, meta["notallowed"], "not-allowed")
})
}
func TestGetOption(t *testing.T) { func TestGetOption(t *testing.T) {
blobStorage := NewAzureBlobStorage(logger.NewLogger("test")).(*AzureBlobStorage) blobStorage := NewAzureBlobStorage(logger.NewLogger("test")).(*AzureBlobStorage)

View File

@ -20,6 +20,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos" "github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
@ -60,6 +61,14 @@ func (c *CosmosDB) Init(metadata bindings.Metadata) error {
c.partitionKey = m.PartitionKey c.partitionKey = m.PartitionKey
opts := azcosmos.ClientOptions{
ClientOptions: policy.ClientOptions{
Telemetry: policy.TelemetryOptions{
ApplicationID: "dapr-" + logger.DaprVersion,
},
},
}
// Create the client; first, try authenticating with a master key, if present // Create the client; first, try authenticating with a master key, if present
var client *azcosmos.Client var client *azcosmos.Client
if m.MasterKey != "" { if m.MasterKey != "" {
@ -67,7 +76,7 @@ func (c *CosmosDB) Init(metadata bindings.Metadata) error {
if keyErr != nil { if keyErr != nil {
return keyErr return keyErr
} }
client, err = azcosmos.NewClientWithKey(m.URL, cred, nil) client, err = azcosmos.NewClientWithKey(m.URL, cred, &opts)
if err != nil { if err != nil {
return err return err
} }
@ -81,7 +90,7 @@ func (c *CosmosDB) Init(metadata bindings.Metadata) error {
if errToken != nil { if errToken != nil {
return errToken return errToken
} }
client, err = azcosmos.NewClient(m.URL, token, nil) client, err = azcosmos.NewClient(m.URL, token, &opts)
if err != nil { if err != nil {
return err return err
} }

View File

@ -25,9 +25,9 @@ import (
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
azauth "github.com/dapr/components-contrib/internal/authentication/azure" azauth "github.com/dapr/components-contrib/internal/authentication/azure"
"github.com/dapr/components-contrib/internal/utils"
contribMetadata "github.com/dapr/components-contrib/metadata" contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
) )
const ( const (
@ -50,6 +50,8 @@ type AzureQueueHelper struct {
queueURL azqueue.QueueURL queueURL azqueue.QueueURL
logger logger.Logger logger logger.Logger
decodeBase64 bool decodeBase64 bool
encodeBase64 bool
visibilityTimeout time.Duration
} }
// Init sets up this helper. // Init sets up this helper.
@ -73,6 +75,8 @@ func (d *AzureQueueHelper) Init(metadata bindings.Metadata) (*storageQueuesMetad
p := azqueue.NewPipeline(credential, pipelineOptions) p := azqueue.NewPipeline(credential, pipelineOptions)
d.decodeBase64 = m.DecodeBase64 d.decodeBase64 = m.DecodeBase64
d.encodeBase64 = m.EncodeBase64
d.visibilityTimeout = *m.VisibilityTimeout
if m.QueueEndpoint != "" { if m.QueueEndpoint != "" {
URL, parseErr := url.Parse(fmt.Sprintf("%s/%s/%s", m.QueueEndpoint, m.AccountName, m.QueueName)) URL, parseErr := url.Parse(fmt.Sprintf("%s/%s/%s", m.QueueEndpoint, m.AccountName, m.QueueName))
@ -103,6 +107,10 @@ func (d *AzureQueueHelper) Write(ctx context.Context, data []byte, ttl *time.Dur
s = string(data) s = string(data)
} }
if d.encodeBase64 {
s = base64.StdEncoding.EncodeToString([]byte(s))
}
if ttl == nil { if ttl == nil {
ttlToUse := defaultTTL ttlToUse := defaultTTL
ttl = &ttlToUse ttl = &ttlToUse
@ -114,7 +122,7 @@ func (d *AzureQueueHelper) Write(ctx context.Context, data []byte, ttl *time.Dur
func (d *AzureQueueHelper) Read(ctx context.Context, consumer *consumer) error { func (d *AzureQueueHelper) Read(ctx context.Context, consumer *consumer) error {
messagesURL := d.queueURL.NewMessagesURL() messagesURL := d.queueURL.NewMessagesURL()
res, err := messagesURL.Dequeue(ctx, 1, time.Second*30) res, err := messagesURL.Dequeue(ctx, 1, d.visibilityTimeout)
if err != nil { if err != nil {
return err return err
} }
@ -174,7 +182,9 @@ type storageQueuesMetadata struct {
QueueEndpoint string QueueEndpoint string
AccountName string AccountName string
DecodeBase64 bool DecodeBase64 bool
EncodeBase64 bool
ttl *time.Duration ttl *time.Duration
VisibilityTimeout *time.Duration
} }
// NewAzureStorageQueues returns a new AzureStorageQueues instance. // NewAzureStorageQueues returns a new AzureStorageQueues instance.
@ -192,29 +202,31 @@ func (a *AzureStorageQueues) Init(metadata bindings.Metadata) (err error) {
return nil return nil
} }
func parseMetadata(metadata bindings.Metadata) (*storageQueuesMetadata, error) { func parseMetadata(meta bindings.Metadata) (*storageQueuesMetadata, error) {
var m storageQueuesMetadata m := storageQueuesMetadata{
VisibilityTimeout: ptr.Of(time.Second * 30),
}
// AccountKey is parsed in azauth // AccountKey is parsed in azauth
if val, ok := contribMetadata.GetMetadataProperty(metadata.Properties, azauth.StorageAccountNameKeys...); ok && val != "" { contribMetadata.DecodeMetadata(meta.Properties, &m)
if val, ok := contribMetadata.GetMetadataProperty(meta.Properties, azauth.StorageAccountNameKeys...); ok && val != "" {
m.AccountName = val m.AccountName = val
} else { } else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageAccountNameKeys[0]) return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageAccountNameKeys[0])
} }
if val, ok := contribMetadata.GetMetadataProperty(metadata.Properties, azauth.StorageQueueNameKeys...); ok && val != "" { if val, ok := contribMetadata.GetMetadataProperty(meta.Properties, azauth.StorageQueueNameKeys...); ok && val != "" {
m.QueueName = val m.QueueName = val
} else { } else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageQueueNameKeys[0]) return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageQueueNameKeys[0])
} }
if val, ok := contribMetadata.GetMetadataProperty(metadata.Properties, azauth.StorageEndpointKeys...); ok && val != "" { if val, ok := contribMetadata.GetMetadataProperty(meta.Properties, azauth.StorageEndpointKeys...); ok && val != "" {
m.QueueEndpoint = val m.QueueEndpoint = val
} }
m.DecodeBase64 = utils.IsTruthy(metadata.Properties["decodeBase64"]) ttl, ok, err := contribMetadata.TryGetTTL(meta.Properties)
ttl, ok, err := contribMetadata.TryGetTTL(metadata.Properties)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata" "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
) )
type MockHelper struct { type MockHelper struct {
@ -297,6 +298,7 @@ func TestParseMetadata(t *testing.T) {
expectedQueueName string expectedQueueName string
expectedQueueEndpointURL string expectedQueueEndpointURL string
expectedTTL *time.Duration expectedTTL *time.Duration
expectedVisibilityTimeout *time.Duration
}{ }{
{ {
name: "Account and key", name: "Account and key",
@ -304,6 +306,7 @@ func TestParseMetadata(t *testing.T) {
// expectedAccountKey: "myKey", // expectedAccountKey: "myKey",
expectedQueueName: "queue1", expectedQueueName: "queue1",
expectedQueueEndpointURL: "", expectedQueueEndpointURL: "",
expectedVisibilityTimeout: ptr.Of(30 * time.Second),
}, },
{ {
name: "Accout, key, and endpoint", name: "Accout, key, and endpoint",
@ -311,6 +314,7 @@ func TestParseMetadata(t *testing.T) {
// expectedAccountKey: "myKey", // expectedAccountKey: "myKey",
expectedQueueName: "queue1", expectedQueueName: "queue1",
expectedQueueEndpointURL: "https://foo.example.com:10001", expectedQueueEndpointURL: "https://foo.example.com:10001",
expectedVisibilityTimeout: ptr.Of(30 * time.Second),
}, },
{ {
name: "Empty TTL", name: "Empty TTL",
@ -318,6 +322,7 @@ func TestParseMetadata(t *testing.T) {
// expectedAccountKey: "myKey", // expectedAccountKey: "myKey",
expectedQueueName: "queue1", expectedQueueName: "queue1",
expectedQueueEndpointURL: "", expectedQueueEndpointURL: "",
expectedVisibilityTimeout: ptr.Of(30 * time.Second),
}, },
{ {
name: "With TTL", name: "With TTL",
@ -326,6 +331,13 @@ func TestParseMetadata(t *testing.T) {
expectedQueueName: "queue1", expectedQueueName: "queue1",
expectedTTL: &oneSecondDuration, expectedTTL: &oneSecondDuration,
expectedQueueEndpointURL: "", expectedQueueEndpointURL: "",
expectedVisibilityTimeout: ptr.Of(30 * time.Second),
},
{
name: "With visibility timeout",
properties: map[string]string{"accessKey": "myKey", "storageAccountQueue": "queue1", "storageAccount": "devstoreaccount1", "visibilityTimeout": "5s"},
expectedQueueName: "queue1",
expectedVisibilityTimeout: ptr.Of(5 * time.Second),
}, },
} }
@ -341,6 +353,7 @@ func TestParseMetadata(t *testing.T) {
assert.Equal(t, tt.expectedQueueName, meta.QueueName) assert.Equal(t, tt.expectedQueueName, meta.QueueName)
assert.Equal(t, tt.expectedTTL, meta.ttl) assert.Equal(t, tt.expectedTTL, meta.ttl)
assert.Equal(t, tt.expectedQueueEndpointURL, meta.QueueEndpoint) assert.Equal(t, tt.expectedQueueEndpointURL, meta.QueueEndpoint)
assert.Equal(t, tt.expectedVisibilityTimeout, meta.VisibilityTimeout)
}) })
} }
} }

View File

@ -18,8 +18,10 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/benbjohnson/clock"
"github.com/pkg/errors" "github.com/pkg/errors"
cron "github.com/robfig/cron/v3"
cron "github.com/dapr/kit/cron"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
@ -31,14 +33,18 @@ type Binding struct {
name string name string
schedule string schedule string
parser cron.Parser parser cron.Parser
runningCtx context.Context clk clock.Clock
runningCancel context.CancelFunc
} }
// NewCron returns a new Cron event input binding. // NewCron returns a new Cron event input binding.
func NewCron(logger logger.Logger) bindings.InputOutputBinding { func NewCron(logger logger.Logger) bindings.InputBinding {
return NewCronWithClock(logger, clock.New())
}
func NewCronWithClock(logger logger.Logger, clk clock.Clock) bindings.InputBinding {
return &Binding{ return &Binding{
logger: logger, logger: logger,
clk: clk,
parser: cron.NewParser( parser: cron.NewParser(
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor, cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
), ),
@ -62,14 +68,12 @@ func (b *Binding) Init(metadata bindings.Metadata) error {
} }
b.schedule = s b.schedule = s
b.resetContext()
return nil return nil
} }
// Read triggers the Cron scheduler. // Read triggers the Cron scheduler.
func (b *Binding) Read(ctx context.Context, handler bindings.Handler) error { func (b *Binding) Read(ctx context.Context, handler bindings.Handler) error {
c := cron.New(cron.WithParser(b.parser)) c := cron.New(cron.WithParser(b.parser), cron.WithClock(b.clk))
id, err := c.AddFunc(b.schedule, func() { id, err := c.AddFunc(b.schedule, func() {
b.logger.Debugf("name: %s, schedule fired: %v", b.name, time.Now()) b.logger.Debugf("name: %s, schedule fired: %v", b.name, time.Now())
handler(ctx, &bindings.ReadResponse{ handler(ctx, &bindings.ReadResponse{
@ -86,50 +90,11 @@ func (b *Binding) Read(ctx context.Context, handler bindings.Handler) error {
b.logger.Debugf("name: %s, next run: %v", b.name, time.Until(c.Entry(id).Next)) b.logger.Debugf("name: %s, next run: %v", b.name, time.Until(c.Entry(id).Next))
go func() { go func() {
// Wait for a context to be canceled // Wait for context to be canceled
select { <-ctx.Done()
case <-b.runningCtx.Done():
// Do nothing
case <-ctx.Done():
b.resetContext()
}
b.logger.Debugf("name: %s, stopping schedule: %s", b.name, b.schedule) b.logger.Debugf("name: %s, stopping schedule: %s", b.name, b.schedule)
c.Stop() c.Stop()
}() }()
return nil return nil
} }
// Invoke exposes way to stop previously started cron.
func (b *Binding) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
b.logger.Debugf("name: %s, operation: %v", b.name, req.Operation)
switch req.Operation {
case bindings.DeleteOperation:
b.resetContext()
return &bindings.InvokeResponse{
Metadata: map[string]string{
"schedule": b.schedule,
"stopTimeUTC": time.Now().UTC().String(),
},
}, nil
default:
return nil, fmt.Errorf("invalid operation: '%v', only '%v' supported",
req.Operation, bindings.DeleteOperation)
}
}
// Operations method returns the supported operations by this binding.
func (b *Binding) Operations() []bindings.OperationKind {
return []bindings.OperationKind{
bindings.DeleteOperation,
}
}
// Resets the runningCtx
func (b *Binding) resetContext() {
if b.runningCancel != nil {
b.runningCancel()
}
b.runningCtx, b.runningCancel = context.WithCancel(context.Background())
}

View File

@ -19,6 +19,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
@ -35,90 +36,112 @@ func getTestMetadata(schedule string) bindings.Metadata {
} }
func getNewCron() *Binding { func getNewCron() *Binding {
clk := clock.New()
return getNewCronWithClock(clk)
}
func getNewCronWithClock(clk clock.Clock) *Binding {
l := logger.NewLogger("cron") l := logger.NewLogger("cron")
if os.Getenv("DEBUG") != "" { if os.Getenv("DEBUG") != "" {
l.SetOutputLevel(logger.DebugLevel) l.SetOutputLevel(logger.DebugLevel)
} }
return NewCronWithClock(l, clk).(*Binding)
return NewCron(l).(*Binding)
} }
// go test -v -timeout 15s -count=1 ./bindings/cron/. // go test -v -timeout 15s -count=1 ./bindings/cron/.
//
//nolint:dupword
func TestCronInitSuccess(t *testing.T) { func TestCronInitSuccess(t *testing.T) {
c := getNewCron() initTests := []struct {
err := c.Init(getTestMetadata("@every 1h")) schedule string
assert.NoErrorf(t, err, "error initializing valid schedule") errorExpected bool
} }{
{
schedule: "@every 1s", // macro cron format
errorExpected: false,
},
{
schedule: "*/3 * * * * *", // non standard cron format
errorExpected: false,
},
{
schedule: "*/15 * * * *", // standard cron format
errorExpected: false,
},
{
schedule: "0 0 1 * *", // standard cron format
errorExpected: false,
},
{
schedule: "0 0 */6 ? * *", // quartz cron format
errorExpected: false,
},
{
schedule: "INVALID_SCHEDULE", // invalid cron format
errorExpected: true,
},
}
func TestCronInitWithSeconds(t *testing.T) { for _, test := range initTests {
c := getNewCron() c := getNewCron()
err := c.Init(getTestMetadata("15 * * * * *")) err := c.Init(getTestMetadata(test.schedule))
assert.NoErrorf(t, err, "error initializing schedule with seconds") if test.errorExpected {
} assert.Errorf(t, err, "Got no error while initializing an invalid schedule: %s", test.schedule)
} else {
func TestCronInitFailure(t *testing.T) { assert.NoErrorf(t, err, "error initializing valid schedule: %s", test.schedule)
c := getNewCron() }
err := c.Init(getTestMetadata("invalid schedule")) }
assert.Errorf(t, err, "no error while initializing invalid schedule")
} }
// TestLongRead // TestLongRead
// go test -v -count=1 -timeout 15s -run TestLongRead ./bindings/cron/. // go test -v -count=1 -timeout 15s -run TestLongRead ./bindings/cron/.
func TestCronReadWithDeleteInvoke(t *testing.T) { func TestCronRead(t *testing.T) {
c := getNewCron() clk := clock.NewMock()
c := getNewCronWithClock(clk)
schedule := "@every 1s" schedule := "@every 1s"
assert.NoErrorf(t, c.Init(getTestMetadata(schedule)), "error initializing valid schedule") assert.NoErrorf(t, c.Init(getTestMetadata(schedule)), "error initializing valid schedule")
testsNum := 3 expectedCount := 5
i := 0 observedCount := 0
err := c.Read(context.Background(), func(ctx context.Context, res *bindings.ReadResponse) ([]byte, error) { err := c.Read(context.Background(), func(ctx context.Context, res *bindings.ReadResponse) ([]byte, error) {
assert.NotNil(t, res) assert.NotNil(t, res)
assert.LessOrEqualf(t, i, testsNum, "Invoke didn't stop the schedule") observedCount++
i++
if i == testsNum {
resp, err := c.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: bindings.DeleteOperation,
})
assert.NoError(t, err)
scheduleVal, exists := resp.Metadata["schedule"]
assert.Truef(t, exists, "Response metadata doesn't include the expected 'schedule' key")
assert.Equal(t, schedule, scheduleVal)
}
return nil, nil return nil, nil
}) })
time.Sleep(time.Duration(testsNum+3) * time.Second) // Check if cron triggers 5 times in 5 seconds
assert.Equal(t, testsNum, i) for i := 0; i < expectedCount; i++ {
// Add time to mock clock in 1 second intervals using loop to allow cron go routine to run
clk.Add(time.Second)
}
// Wait for 1 second after adding the last second to mock clock to allow cron to finish triggering
time.Sleep(1 * time.Second)
assert.Equal(t, expectedCount, observedCount, "Cron did not trigger expected number of times, expected %d, got %d", expectedCount, observedCount)
assert.NoErrorf(t, err, "error on read") assert.NoErrorf(t, err, "error on read")
} }
func TestCronReadWithContextCancellation(t *testing.T) { func TestCronReadWithContextCancellation(t *testing.T) {
c := getNewCron() clk := clock.NewMock()
c := getNewCronWithClock(clk)
schedule := "@every 1s" schedule := "@every 1s"
assert.NoErrorf(t, c.Init(getTestMetadata(schedule)), "error initializing valid schedule") assert.NoErrorf(t, c.Init(getTestMetadata(schedule)), "error initializing valid schedule")
testsNum := 3 expectedCount := 5
i := 0 observedCount := 0
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
err := c.Read(ctx, func(ctx context.Context, res *bindings.ReadResponse) ([]byte, error) { err := c.Read(ctx, func(ctx context.Context, res *bindings.ReadResponse) ([]byte, error) {
assert.NotNil(t, res) assert.NotNil(t, res)
assert.LessOrEqualf(t, i, testsNum, "Invoke didn't stop the schedule") assert.LessOrEqualf(t, observedCount, expectedCount, "Invoke didn't stop the schedule")
i++ observedCount++
if i == testsNum { if observedCount == expectedCount {
// Cancel context after 5 triggers
cancel() cancel()
} }
return nil, nil return nil, nil
}) })
time.Sleep(time.Duration(testsNum+3) * time.Second) // Check if cron triggers only 5 times in 10 seconds since context should be cancelled after 5 triggers
assert.Equal(t, testsNum, i) for i := 0; i < 10; i++ {
// Add time to mock clock in 1 second intervals using loop to allow cron go routine to run
clk.Add(time.Second)
}
time.Sleep(1 * time.Second)
assert.Equal(t, expectedCount, observedCount, "Cron did not trigger expected number of times, expected %d, got %d", expectedCount, observedCount)
assert.NoErrorf(t, err, "error on read") assert.NoErrorf(t, err, "error on read")
} }
func TestCronInvokeInvalidOperation(t *testing.T) {
c := getNewCron()
initErr := c.Init(getTestMetadata("@every 1s"))
assert.NoErrorf(t, initErr, "Error on Init")
_, err := c.Invoke(context.Background(), &bindings.InvokeRequest{
Operation: bindings.CreateOperation,
})
assert.Error(t, err)
}

View File

@ -196,7 +196,7 @@ func (g *GCPStorage) create(ctx context.Context, req *bindings.InvokeRequest) (*
func (g *GCPStorage) get(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (g *GCPStorage) get(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
metadata, err := g.metadata.mergeWithRequestMetadata(req) metadata, err := g.metadata.mergeWithRequestMetadata(req)
if err != nil { if err != nil {
return nil, fmt.Errorf("gcp binding binding error. error merge metadata : %w", err) return nil, fmt.Errorf("gcp binding error. error merge metadata : %w", err)
} }
var key string var key string

View File

@ -28,6 +28,7 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/internal/utils"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
) )
@ -37,7 +38,7 @@ import (
type HTTPSource struct { type HTTPSource struct {
metadata httpMetadata metadata httpMetadata
client *http.Client client *http.Client
errorIfNot2XX bool
logger logger.Logger logger logger.Logger
} }
@ -70,6 +71,13 @@ func (h *HTTPSource) Init(metadata bindings.Metadata) error {
Transport: netTransport, Transport: netTransport,
} }
if val, ok := metadata.Properties["errorIfNot2XX"]; ok {
h.errorIfNot2XX = utils.IsTruthy(val)
} else {
// Default behavior
h.errorIfNot2XX = true
}
return nil return nil
} }
@ -91,6 +99,9 @@ func (h *HTTPSource) Operations() []bindings.OperationKind {
// Invoke performs an HTTP request to the configured HTTP endpoint. // Invoke performs an HTTP request to the configured HTTP endpoint.
func (h *HTTPSource) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (h *HTTPSource) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
u := h.metadata.URL u := h.metadata.URL
errorIfNot2XX := h.errorIfNot2XX // Default to the component config (default is true)
if req.Metadata != nil { if req.Metadata != nil {
if path, ok := req.Metadata["path"]; ok { if path, ok := req.Metadata["path"]; ok {
// Simplicity and no "../../.." type exploits. // Simplicity and no "../../.." type exploits.
@ -99,6 +110,13 @@ func (h *HTTPSource) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*
return nil, fmt.Errorf("invalid path: %s", path) return nil, fmt.Errorf("invalid path: %s", path)
} }
} }
if _, ok := req.Metadata["errorIfNot2XX"]; ok {
errorIfNot2XX = utils.IsTruthy(req.Metadata["errorIfNot2XX"])
}
} else {
// Prevent things below from failing if req.Metadata is nil.
req.Metadata = make(map[string]string)
} }
var body io.Reader var body io.Reader
@ -164,8 +182,8 @@ func (h *HTTPSource) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*
metadata[key] = strings.Join(values, ", ") metadata[key] = strings.Join(values, ", ")
} }
// Create an error for non-200 status codes. // Create an error for non-200 status codes unless suppressed.
if resp.StatusCode/100 != 2 { if errorIfNot2XX && resp.StatusCode/100 != 2 {
err = fmt.Errorf("received status code %d", resp.StatusCode) err = fmt.Errorf("received status code %d", resp.StatusCode)
} }

View File

@ -18,6 +18,7 @@ import (
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"strconv"
"strings" "strings"
"testing" "testing"
@ -45,12 +46,38 @@ func TestOperations(t *testing.T) {
}, opers) }, opers)
} }
func TestInit(t *testing.T) { type TestCase struct {
var path string input string
operation string
metadata map[string]string
path string
err string
statusCode int
}
func (tc TestCase) ToInvokeRequest() bindings.InvokeRequest {
requestMetadata := tc.metadata
if requestMetadata == nil {
requestMetadata = map[string]string{}
}
requestMetadata["X-Status-Code"] = strconv.Itoa(tc.statusCode)
return bindings.InvokeRequest{
Data: []byte(tc.input),
Metadata: requestMetadata,
Operation: bindings.OperationKind(tc.operation),
}
}
type HTTPHandler struct {
Path string
}
func (h *HTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.Path = req.URL.Path
s := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
path = req.URL.Path
input := req.Method input := req.Method
if req.Body != nil { if req.Body != nil {
defer req.Body.Close() defer req.Body.Close()
@ -63,37 +90,67 @@ func TestInit(t *testing.T) {
if inputFromHeader != "" { if inputFromHeader != "" {
input = inputFromHeader input = inputFromHeader
} }
w.Header().Set("Content-Type", "text/plain")
if input == "internal server error" {
w.WriteHeader(http.StatusInternalServerError)
}
w.Write([]byte(strings.ToUpper(input)))
}),
)
defer s.Close()
w.Header().Set("Content-Type", "text/plain")
statusCode := req.Header.Get("X-Status-Code")
if statusCode != "" {
code, _ := strconv.Atoi(statusCode)
w.WriteHeader(code)
}
w.Write([]byte(strings.ToUpper(input)))
}
func NewHTTPHandler() *HTTPHandler {
return &HTTPHandler{
Path: "/",
}
}
func InitBinding(s *httptest.Server, extraProps map[string]string) (bindings.OutputBinding, error) {
m := bindings.Metadata{Base: metadata.Base{ m := bindings.Metadata{Base: metadata.Base{
Properties: map[string]string{ Properties: map[string]string{
"url": s.URL, "url": s.URL,
}, },
}} }}
if extraProps != nil {
for k, v := range extraProps {
m.Properties[k] = v
}
}
hs := bindingHttp.NewHTTP(logger.NewLogger("test")) hs := bindingHttp.NewHTTP(logger.NewLogger("test"))
err := hs.Init(m) err := hs.Init(m)
return hs, err
}
func TestInit(t *testing.T) {
handler := NewHTTPHandler()
s := httptest.NewServer(handler)
defer s.Close()
_, err := InitBinding(s, nil)
require.NoError(t, err)
}
func TestDefaultBehavior(t *testing.T) {
handler := NewHTTPHandler()
s := httptest.NewServer(handler)
defer s.Close()
hs, err := InitBinding(s, nil)
require.NoError(t, err) require.NoError(t, err)
tests := map[string]struct { tests := map[string]TestCase{
input string
operation string
metadata map[string]string
path string
err string
}{
"get": { "get": {
input: "GET", input: "GET",
operation: "get", operation: "get",
metadata: nil, metadata: nil,
path: "/", path: "/",
err: "", err: "",
statusCode: 200,
}, },
"request headers": { "request headers": {
input: "OVERRIDE", input: "OVERRIDE",
@ -101,6 +158,7 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"X-Input": "override"}, metadata: map[string]string{"X-Input": "override"},
path: "/", path: "/",
err: "", err: "",
statusCode: 200,
}, },
"post": { "post": {
input: "expected", input: "expected",
@ -108,10 +166,12 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"path": "/test"}, metadata: map[string]string{"path": "/test"},
path: "/test", path: "/test",
err: "", err: "",
statusCode: 201,
}, },
"put": { "put": {
input: "expected", input: "expected",
operation: "put", operation: "put",
statusCode: 204,
metadata: map[string]string{"path": "/test"}, metadata: map[string]string{"path": "/test"},
path: "/test", path: "/test",
err: "", err: "",
@ -122,6 +182,7 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"path": "/test"}, metadata: map[string]string{"path": "/test"},
path: "/test", path: "/test",
err: "", err: "",
statusCode: 206,
}, },
"delete": { "delete": {
input: "DELETE", input: "DELETE",
@ -129,6 +190,7 @@ func TestInit(t *testing.T) {
metadata: nil, metadata: nil,
path: "/", path: "/",
err: "", err: "",
statusCode: 200,
}, },
"options": { "options": {
input: "OPTIONS", input: "OPTIONS",
@ -136,6 +198,7 @@ func TestInit(t *testing.T) {
metadata: nil, metadata: nil,
path: "/", path: "/",
err: "", err: "",
statusCode: 200,
}, },
"trace": { "trace": {
input: "TRACE", input: "TRACE",
@ -143,6 +206,7 @@ func TestInit(t *testing.T) {
metadata: nil, metadata: nil,
path: "/", path: "/",
err: "", err: "",
statusCode: 200,
}, },
"backward compatibility": { "backward compatibility": {
input: "expected", input: "expected",
@ -150,6 +214,7 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"path": "/test"}, metadata: map[string]string{"path": "/test"},
path: "/test", path: "/test",
err: "", err: "",
statusCode: 200,
}, },
"invalid path": { "invalid path": {
input: "expected", input: "expected",
@ -157,6 +222,7 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"path": "/../test"}, metadata: map[string]string{"path": "/../test"},
path: "", path: "",
err: "invalid path: /../test", err: "invalid path: /../test",
statusCode: 400,
}, },
"invalid operation": { "invalid operation": {
input: "notvalid", input: "notvalid",
@ -164,6 +230,7 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"path": "/test"}, metadata: map[string]string{"path": "/test"},
path: "/test", path: "/test",
err: "invalid operation: notvalid", err: "invalid operation: notvalid",
statusCode: 400,
}, },
"internal server error": { "internal server error": {
input: "internal server error", input: "internal server error",
@ -171,20 +238,124 @@ func TestInit(t *testing.T) {
metadata: map[string]string{"path": "/"}, metadata: map[string]string{"path": "/"},
path: "/", path: "/",
err: "received status code 500", err: "received status code 500",
statusCode: 500,
},
"internal server error suppressed": {
input: "internal server error", // trigger 500 downstream
operation: "post",
metadata: map[string]string{"path": "/", "errorIfNot2XX": "false"},
path: "/",
err: "",
statusCode: 500,
},
"redirect should not yield an error": {
input: "show me the treasure!",
operation: "post",
metadata: map[string]string{"path": "/", "errorIfNot2XX": "false"},
path: "/",
err: "",
statusCode: 302,
},
"redirect results in an error if not suppressed": {
input: "show me the treasure!",
operation: "post",
metadata: map[string]string{"path": "/"},
path: "/",
err: "received status code 302",
statusCode: 302,
}, },
} }
for name, tc := range tests { for name, tc := range tests {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
response, err := hs.Invoke(context.TODO(), &bindings.InvokeRequest{ req := tc.ToInvokeRequest()
Data: []byte(tc.input), response, err := hs.Invoke(context.TODO(), &req)
Metadata: tc.metadata,
Operation: bindings.OperationKind(tc.operation),
})
if tc.err == "" { if tc.err == "" {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tc.path, path) assert.Equal(t, tc.path, handler.Path)
if tc.statusCode != 204 {
// 204 will return no content, so we should skip checking
assert.Equal(t, strings.ToUpper(tc.input), string(response.Data)) assert.Equal(t, strings.ToUpper(tc.input), string(response.Data))
}
assert.Equal(t, "text/plain", response.Metadata["Content-Type"])
} else {
require.Error(t, err)
assert.Equal(t, tc.err, err.Error())
}
})
}
}
func TestNon2XXErrorsSuppressed(t *testing.T) {
handler := NewHTTPHandler()
s := httptest.NewServer(handler)
defer s.Close()
hs, err := InitBinding(s, map[string]string{"errorIfNot2XX": "false"})
require.NoError(t, err)
tests := map[string]TestCase{
"internal server error": {
input: "internal server error",
operation: "post",
metadata: map[string]string{"path": "/"},
path: "/",
err: "",
statusCode: 500,
},
"internal server error overridden": {
input: "internal server error",
operation: "post",
metadata: map[string]string{"path": "/", "errorIfNot2XX": "true"},
path: "/",
err: "received status code 500",
statusCode: 500,
},
"internal server error suppressed by request and component": {
input: "internal server error", // trigger 500
operation: "post",
metadata: map[string]string{"path": "/", "errorIfNot2XX": "false"},
path: "/",
err: "",
statusCode: 500,
},
"trace": {
input: "TRACE",
operation: "trace",
metadata: nil,
path: "/",
err: "",
statusCode: 200,
},
"backward compatibility": {
input: "expected",
operation: "create",
metadata: map[string]string{"path": "/test"},
path: "/test",
err: "",
statusCode: 200,
},
"invalid path": {
input: "expected",
operation: "POST",
metadata: map[string]string{"path": "/../test"},
path: "",
err: "invalid path: /../test",
statusCode: 400,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
req := tc.ToInvokeRequest()
response, err := hs.Invoke(context.TODO(), &req)
if tc.err == "" {
require.NoError(t, err)
assert.Equal(t, tc.path, handler.Path)
if tc.statusCode != 204 {
// 204 will return no content, so we should skip checking
assert.Equal(t, strings.ToUpper(tc.input), string(response.Data))
}
assert.Equal(t, "text/plain", response.Metadata["Content-Type"]) assert.Equal(t, "text/plain", response.Metadata["Content-Type"])
} else { } else {
require.Error(t, err) require.Error(t, err)

143
bindings/kubemq/kubemq.go Normal file
View File

@ -0,0 +1,143 @@
package kubemq
import (
"context"
"fmt"
"strings"
"time"
qs "github.com/kubemq-io/kubemq-go/queues_stream"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/kit/logger"
)
// interface used to allow unit testing.
type Kubemq interface {
bindings.InputBinding
bindings.OutputBinding
}
type kubeMQ struct {
client *qs.QueuesStreamClient
opts *options
logger logger.Logger
ctx context.Context
ctxCancel context.CancelFunc
}
func NewKubeMQ(logger logger.Logger) Kubemq {
return &kubeMQ{
client: nil,
opts: nil,
logger: logger,
ctx: nil,
ctxCancel: nil,
}
}
func (k *kubeMQ) Init(metadata bindings.Metadata) error {
opts, err := createOptions(metadata)
if err != nil {
return err
}
k.opts = opts
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
client, err := qs.NewQueuesStreamClient(k.ctx,
qs.WithAddress(opts.host, opts.port),
qs.WithCheckConnection(true),
qs.WithAuthToken(opts.authToken),
qs.WithAutoReconnect(true),
qs.WithReconnectInterval(time.Second))
if err != nil {
k.logger.Errorf("error init kubemq client error: %s", err.Error())
return err
}
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
k.client = client
return nil
}
func (k *kubeMQ) Read(ctx context.Context, handler bindings.Handler) error {
go func() {
for {
err := k.processQueueMessage(k.ctx, handler)
if err != nil {
k.logger.Error(err.Error())
time.Sleep(time.Second)
}
if k.ctx.Err() != nil {
return
}
}
}()
return nil
}
func (k *kubeMQ) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
queueMessage := qs.NewQueueMessage().
SetChannel(k.opts.channel).
SetBody(req.Data).
SetPolicyDelaySeconds(parsePolicyDelaySeconds(req.Metadata)).
SetPolicyExpirationSeconds(parsePolicyExpirationSeconds(req.Metadata)).
SetPolicyMaxReceiveCount(parseSetPolicyMaxReceiveCount(req.Metadata)).
SetPolicyMaxReceiveQueue(parsePolicyMaxReceiveQueue(req.Metadata))
result, err := k.client.Send(k.ctx, queueMessage)
if err != nil {
return nil, err
}
if len(result.Results) > 0 {
if result.Results[0].IsError {
return nil, fmt.Errorf("error sending queue message: %s", result.Results[0].Error)
}
}
return &bindings.InvokeResponse{
Data: nil,
Metadata: nil,
}, nil
}
func (k *kubeMQ) Operations() []bindings.OperationKind {
return []bindings.OperationKind{bindings.CreateOperation}
}
func (k *kubeMQ) processQueueMessage(ctx context.Context, handler bindings.Handler) error {
pr := qs.NewPollRequest().
SetChannel(k.opts.channel).
SetMaxItems(k.opts.pollMaxItems).
SetWaitTimeout(k.opts.pollTimeoutSeconds).
SetAutoAck(k.opts.autoAcknowledged)
pollResp, err := k.client.Poll(ctx, pr)
if err != nil {
if strings.Contains(err.Error(), "timout waiting response") {
return nil
}
return err
}
if !pollResp.HasMessages() {
return nil
}
for _, message := range pollResp.Messages {
_, err := handler(ctx, &bindings.ReadResponse{
Data: message.Body,
})
if err != nil {
k.logger.Errorf("error received from response handler: %s", err.Error())
err := message.NAck()
if err != nil {
k.logger.Errorf("error processing nack message error: %s", err.Error())
}
time.Sleep(time.Second)
continue
} else {
err := message.Ack()
if err != nil {
k.logger.Errorf("error processing ack queue message error: %s", err.Error())
continue
}
}
}
return nil
}

View File

@ -0,0 +1,194 @@
//go:build integration_test
// +build integration_test
package kubemq
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
const (
// Environment variable containing the host name for KubeMQ integration tests
// To run using docker: docker run -d --hostname -kubemq --name test-kubemq -p 50000:50000 kubemq/kubemq-community:latest
// In that case the address string will be: "localhost:50000"
testKubeMQHostEnvKey = "DAPR_TEST_KUBEMQ_HOST"
)
func getTestKubeMQHost() string {
host := os.Getenv(testKubeMQHostEnvKey)
if host == "" {
host = "localhost:50000"
}
return host
}
func getDefaultMetadata(channel string) bindings.Metadata {
return bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": getTestKubeMQHost(),
"channel": channel,
"pollMaxItems": "1",
"autoAcknowledged": "true",
"pollTimeoutSeconds": "2",
},
},
}
}
func Test_kubeMQ_Init(t *testing.T) {
tests := []struct {
name string
meta bindings.Metadata
wantErr bool
}{
{
name: "init with valid options",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": getTestKubeMQHost(),
"channel": "test",
"pollMaxItems": "1",
"autoAcknowledged": "true",
"pollTimeoutSeconds": "2",
},
},
},
wantErr: false,
},
{
name: "init with invalid options",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost-bad:50000",
"channel": "test",
"pollMaxItems": "1",
"autoAcknowledged": "true",
"pollTimeoutSeconds": "2",
},
},
},
wantErr: true,
},
{
name: "init with invalid parsing options",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "bad",
"channel": "test",
"pollMaxItems": "1",
"autoAcknowledged": "true",
"pollTimeoutSeconds": "2",
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
kubemq := NewKubeMQ(logger.NewLogger("test"))
err := kubemq.Init(tt.meta)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
func Test_kubeMQ_Invoke_Read_Single_Message(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
kubemq := NewKubeMQ(logger.NewLogger("test"))
err := kubemq.Init(getDefaultMetadata("test.read.single"))
require.NoError(t, err)
dataReadCh := make(chan []byte)
invokeRequest := &bindings.InvokeRequest{
Data: []byte("test"),
Metadata: map[string]string{},
}
_, err = kubemq.Invoke(ctx, invokeRequest)
require.NoError(t, err)
_ = kubemq.Read(ctx, func(ctx context.Context, req *bindings.ReadResponse) ([]byte, error) {
dataReadCh <- req.Data
return req.Data, nil
})
select {
case <-ctx.Done():
require.Fail(t, "timeout waiting for read response")
case data := <-dataReadCh:
require.Equal(t, invokeRequest.Data, data)
}
}
func Test_kubeMQ_Invoke_Read_Single_MessageWithHandlerError(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
kubemq := NewKubeMQ(logger.NewLogger("test"))
md := getDefaultMetadata("test.read.single.error")
md.Properties["autoAcknowledged"] = "false"
err := kubemq.Init(md)
require.NoError(t, err)
invokeRequest := &bindings.InvokeRequest{
Data: []byte("test"),
Metadata: map[string]string{},
}
_, err = kubemq.Invoke(ctx, invokeRequest)
require.NoError(t, err)
firstReadCtx, firstReadCancel := context.WithTimeout(context.Background(), time.Second*3)
defer firstReadCancel()
_ = kubemq.Read(firstReadCtx, func(ctx context.Context, req *bindings.ReadResponse) ([]byte, error) {
return nil, fmt.Errorf("handler error")
})
<-firstReadCtx.Done()
dataReadCh := make(chan []byte)
secondReadCtx, secondReadCancel := context.WithTimeout(context.Background(), time.Second*3)
defer secondReadCancel()
_ = kubemq.Read(secondReadCtx, func(ctx context.Context, req *bindings.ReadResponse) ([]byte, error) {
dataReadCh <- req.Data
return req.Data, nil
})
select {
case <-secondReadCtx.Done():
require.Fail(t, "timeout waiting for read response")
case data := <-dataReadCh:
require.Equal(t, invokeRequest.Data, data)
}
}
func Test_kubeMQ_Invoke_Error(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
kubemq := NewKubeMQ(logger.NewLogger("test"))
err := kubemq.Init(getDefaultMetadata("***test***"))
require.NoError(t, err)
invokeRequest := &bindings.InvokeRequest{
Data: []byte("test"),
Metadata: map[string]string{},
}
_, err = kubemq.Invoke(ctx, invokeRequest)
require.Error(t, err)
}

View File

@ -0,0 +1,404 @@
package kubemq
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/metadata"
)
func Test_createOptions(t *testing.T) {
tests := []struct {
name string
meta bindings.Metadata
want *options
wantErr bool
}{
{
name: "create valid opts",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"authToken": "authToken",
"pollMaxItems": "10",
"autoAcknowledged": "true",
"pollTimeoutSeconds": "10",
},
},
},
want: &options{
host: "localhost",
port: 50000,
authToken: "authToken",
channel: "test",
autoAcknowledged: true,
pollMaxItems: 10,
pollTimeoutSeconds: 10,
},
wantErr: false,
},
{
name: "create invalid opts with bad host",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": ":50000",
"clientId": "clientId",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with bad port",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:badport",
"clientId": "clientId",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with empty address",
meta: bindings.Metadata{
Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "",
"clientId": "clientId",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with bad address format",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost50000",
},
}},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with no channel",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
},
}},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with bad autoAcknowledged",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"autoAcknowledged": "bad",
},
}},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with invalid pollMaxItems",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"pollMaxItems": "0",
},
}},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with bad pollMaxItems format",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"pollMaxItems": "bad",
},
}},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with invalid pollTimeoutSeconds",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"pollTimeoutSeconds": "0",
},
}},
want: nil,
wantErr: true,
},
{
name: "create invalid opts with bad format pollTimeoutSeconds",
meta: bindings.Metadata{Base: metadata.Base{
Name: "kubemq",
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"pollTimeoutSeconds": "bad",
},
}},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := createOptions(tt.meta)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.want, got)
}
})
}
}
func Test_parsePolicyDelaySeconds(t *testing.T) {
type args struct {
md map[string]string
}
tests := []struct {
name string
args args
want int
}{
{
name: "parse policy delay seconds - nil",
args: args{
md: nil,
},
want: 0,
},
{
name: "parse policy delay seconds - empty",
args: args{
md: map[string]string{},
},
want: 0,
},
{
name: "parse policy delay seconds",
args: args{
md: map[string]string{
"delaySeconds": "10",
},
},
want: 10,
},
{
name: "parse policy delay seconds with bad format",
args: args{
md: map[string]string{
"delaySeconds": "bad",
},
},
want: 0,
},
{
name: "parse policy delay seconds with negative value",
args: args{
md: map[string]string{
"delaySeconds": "-10",
},
},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, parsePolicyDelaySeconds(tt.args.md), "parsePolicyDelaySeconds(%v)", tt.args.md)
})
}
}
func Test_parsePolicyExpirationSeconds(t *testing.T) {
type args struct {
md map[string]string
}
tests := []struct {
name string
args args
want int
}{
{
name: "parse policy expiration seconds - nil",
args: args{
md: nil,
},
want: 0,
},
{
name: "parse policy expiration seconds - empty",
args: args{
md: map[string]string{},
},
want: 0,
},
{
name: "parse policy expiration seconds",
args: args{
md: map[string]string{
"expirationSeconds": "10",
},
},
want: 10,
},
{
name: "parse policy expiration seconds with bad format",
args: args{
md: map[string]string{
"expirationSeconds": "bad",
},
},
want: 0,
},
{
name: "parse policy expiration seconds with negative value",
args: args{
md: map[string]string{
"expirationSeconds": "-10",
},
},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, parsePolicyExpirationSeconds(tt.args.md), "parsePolicyExpirationSeconds(%v)", tt.args.md)
})
}
}
func Test_parseSetPolicyMaxReceiveCount(t *testing.T) {
type args struct {
md map[string]string
}
tests := []struct {
name string
args args
want int
}{
{
name: "parse policy max receive count nil",
args: args{
md: nil,
},
want: 0,
},
{
name: "parse policy max receive count empty",
args: args{
md: map[string]string{},
},
want: 0,
},
{
name: "parse policy max receive count",
args: args{
md: map[string]string{
"maxReceiveCount": "10",
},
},
want: 10,
},
{
name: "parse policy max receive count with bad format",
args: args{
md: map[string]string{
"maxReceiveCount": "bad",
},
},
want: 0,
},
{
name: "parse policy max receive count with negative value",
args: args{
md: map[string]string{
"maxReceiveCount": "-10",
},
},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, parseSetPolicyMaxReceiveCount(tt.args.md), "parseSetPolicyMaxReceiveCount(%v)", tt.args.md)
})
}
}
func Test_parsePolicyMaxReceiveQueue(t *testing.T) {
type args struct {
md map[string]string
}
tests := []struct {
name string
args args
want string
}{
{
name: "parse policy max receive queue nil",
args: args{
md: nil,
},
want: "",
},
{
name: "parse policy max receive queue empty",
args: args{
md: map[string]string{},
},
want: "",
},
{
name: "parse policy max receive queue",
args: args{
md: map[string]string{
"maxReceiveQueue": "some-queue",
},
},
want: "some-queue",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, parsePolicyMaxReceiveQueue(tt.args.md), "parsePolicyMaxReceiveQueue(%v)", tt.args.md)
})
}
}

162
bindings/kubemq/options.go Normal file
View File

@ -0,0 +1,162 @@
package kubemq
import (
"fmt"
"strconv"
"strings"
"github.com/dapr/components-contrib/bindings"
)
type options struct {
host string
port int
channel string
authToken string
autoAcknowledged bool
pollMaxItems int
pollTimeoutSeconds int
}
func parseAddress(address string) (string, int, error) {
var host string
var port int
var err error
hostPort := strings.Split(address, ":")
if len(hostPort) != 2 {
return "", 0, fmt.Errorf("invalid kubemq address, address format is invalid")
}
host = hostPort[0]
if len(host) == 0 {
return "", 0, fmt.Errorf("invalid kubemq address, host is empty")
}
port, err = strconv.Atoi(hostPort[1])
if err != nil {
return "", 0, fmt.Errorf("invalid kubemq address, port is invalid")
}
return host, port, nil
}
// createOptions creates a new instance from the kubemq options
func createOptions(md bindings.Metadata) (*options, error) {
result := &options{
host: "",
port: 0,
channel: "",
authToken: "",
autoAcknowledged: false,
pollMaxItems: 1,
pollTimeoutSeconds: 3600,
}
if val, found := md.Properties["address"]; found && val != "" {
var err error
result.host, result.port, err = parseAddress(val)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("invalid kubemq address, address is empty")
}
if val, ok := md.Properties["channel"]; ok && val != "" {
result.channel = val
} else {
return nil, fmt.Errorf("invalid kubemq channel, channel is empty")
}
if val, found := md.Properties["authToken"]; found && val != "" {
if found && val != "" {
result.authToken = val
}
}
if val, found := md.Properties["autoAcknowledged"]; found && val != "" {
autoAcknowledged, err := strconv.ParseBool(val)
if err != nil {
return nil, fmt.Errorf("invalid kubemq autoAcknowledged value, %s", err.Error())
}
result.autoAcknowledged = autoAcknowledged
}
if val, found := md.Properties["pollMaxItems"]; found && val != "" {
pollMaxItems, err := strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("invalid kubemq pollMaxItems value, %s", err.Error())
}
if pollMaxItems < 1 {
return nil, fmt.Errorf("invalid kubemq pollMaxItems value, value must be greater than 0")
}
result.pollMaxItems = pollMaxItems
}
if val, found := md.Properties["pollTimeoutSeconds"]; found && val != "" {
timeoutSecond, err := strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("invalid kubemq pollTimeoutSeconds value, %s", err.Error())
} else {
if timeoutSecond < 1 {
return nil, fmt.Errorf("invalid kubemq pollTimeoutSeconds value, value must be greater than 0")
}
result.pollTimeoutSeconds = timeoutSecond
}
}
return result, nil
}
func parsePolicyDelaySeconds(md map[string]string) int {
if md == nil {
return 0
}
if val, found := md["delaySeconds"]; found && val != "" {
delaySeconds, err := strconv.Atoi(val)
if err != nil {
return 0
}
if delaySeconds < 0 {
return 0
}
return delaySeconds
}
return 0
}
func parsePolicyExpirationSeconds(md map[string]string) int {
if md == nil {
return 0
}
if val, found := md["expirationSeconds"]; found && val != "" {
expirationSeconds, err := strconv.Atoi(val)
if err != nil {
return 0
}
if expirationSeconds < 0 {
return 0
}
return expirationSeconds
}
return 0
}
func parseSetPolicyMaxReceiveCount(md map[string]string) int {
if md == nil {
return 0
}
if val, found := md["maxReceiveCount"]; found && val != "" {
maxReceiveCount, err := strconv.Atoi(val)
if err != nil {
return 0
}
if maxReceiveCount < 0 {
return 0
}
return maxReceiveCount
}
return 0
}
func parsePolicyMaxReceiveQueue(md map[string]string) string {
if md == nil {
return ""
}
if val, found := md["maxReceiveQueue"]; found && val != "" {
return val
}
return ""
}

View File

@ -20,6 +20,7 @@ import (
"database/sql" "database/sql"
"database/sql/driver" "database/sql/driver"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"os" "os"
"reflect" "reflect"
@ -27,7 +28,6 @@ import (
"time" "time"
"github.com/go-sql-driver/mysql" "github.com/go-sql-driver/mysql"
"github.com/pkg/errors"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
@ -117,7 +117,7 @@ func (m *Mysql) Init(metadata bindings.Metadata) error {
err = db.Ping() err = db.Ping()
if err != nil { if err != nil {
return errors.Wrap(err, "unable to ping the DB") return fmt.Errorf("unable to ping the DB: %w", err)
} }
m.db = db m.db = db
@ -128,7 +128,7 @@ func (m *Mysql) Init(metadata bindings.Metadata) error {
// Invoke handles all invoke operations. // Invoke handles all invoke operations.
func (m *Mysql) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (m *Mysql) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
if req == nil { if req == nil {
return nil, errors.Errorf("invoke request required") return nil, errors.New("invoke request required")
} }
if req.Operation == closeOperation { if req.Operation == closeOperation {
@ -136,13 +136,13 @@ func (m *Mysql) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindi
} }
if req.Metadata == nil { if req.Metadata == nil {
return nil, errors.Errorf("metadata required") return nil, errors.New("metadata required")
} }
m.logger.Debugf("operation: %v", req.Operation) m.logger.Debugf("operation: %v", req.Operation)
s, ok := req.Metadata[commandSQLKey] s, ok := req.Metadata[commandSQLKey]
if !ok || s == "" { if !ok || s == "" {
return nil, errors.Errorf("required metadata not set: %s", commandSQLKey) return nil, fmt.Errorf("required metadata not set: %s", commandSQLKey)
} }
startTime := time.Now() startTime := time.Now()
@ -171,7 +171,7 @@ func (m *Mysql) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindi
resp.Data = d resp.Data = d
default: default:
return nil, errors.Errorf("invalid operation type: %s. Expected %s, %s, or %s", return nil, fmt.Errorf("invalid operation type: %s. Expected %s, %s, or %s",
req.Operation, execOperation, queryOperation, closeOperation) req.Operation, execOperation, queryOperation, closeOperation)
} }
@ -201,11 +201,9 @@ func (m *Mysql) Close() error {
} }
func (m *Mysql) query(ctx context.Context, sql string) ([]byte, error) { func (m *Mysql) query(ctx context.Context, sql string) ([]byte, error) {
m.logger.Debugf("query: %s", sql)
rows, err := m.db.QueryContext(ctx, sql) rows, err := m.db.QueryContext(ctx, sql)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error executing %s", sql) return nil, fmt.Errorf("error executing query: %w", err)
} }
defer func() { defer func() {
@ -215,7 +213,7 @@ func (m *Mysql) query(ctx context.Context, sql string) ([]byte, error) {
result, err := m.jsonify(rows) result, err := m.jsonify(rows)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error marshalling query result for %s", sql) return nil, fmt.Errorf("error marshalling query result for query: %w", err)
} }
return result, nil return result, nil
@ -226,7 +224,7 @@ func (m *Mysql) exec(ctx context.Context, sql string) (int64, error) {
res, err := m.db.ExecContext(ctx, sql) res, err := m.db.ExecContext(ctx, sql)
if err != nil { if err != nil {
return 0, errors.Wrapf(err, "error executing %s", sql) return 0, fmt.Errorf("error executing query: %w", err)
} }
return res.RowsAffected() return res.RowsAffected()
@ -237,7 +235,7 @@ func propertyToInt(props map[string]string, key string, setter func(int)) error
if i, err := strconv.Atoi(v); err == nil { if i, err := strconv.Atoi(v); err == nil {
setter(i) setter(i)
} else { } else {
return errors.Wrapf(err, "error converitng %s:%s to int", key, v) return fmt.Errorf("error converting %s:%s to int: %w", key, v, err)
} }
} }
@ -249,7 +247,7 @@ func propertyToDuration(props map[string]string, key string, setter func(time.Du
if d, err := time.ParseDuration(v); err == nil { if d, err := time.ParseDuration(v); err == nil {
setter(d) setter(d)
} else { } else {
return errors.Wrapf(err, "error converitng %s:%s to time duration", key, v) return fmt.Errorf("error converting %s:%s to duration: %w", key, v, err)
} }
} }
@ -258,14 +256,14 @@ func propertyToDuration(props map[string]string, key string, setter func(time.Du
func initDB(url, pemPath string) (*sql.DB, error) { func initDB(url, pemPath string) (*sql.DB, error) {
if _, err := mysql.ParseDSN(url); err != nil { if _, err := mysql.ParseDSN(url); err != nil {
return nil, errors.Wrapf(err, "illegal Data Source Name (DNS) specified by %s", connectionURLKey) return nil, fmt.Errorf("illegal Data Source Name (DSN) specified by %s", connectionURLKey)
} }
if pemPath != "" { if pemPath != "" {
rootCertPool := x509.NewCertPool() rootCertPool := x509.NewCertPool()
pem, err := os.ReadFile(pemPath) pem, err := os.ReadFile(pemPath)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "Error reading PEM file from %s", pemPath) return nil, fmt.Errorf("error reading PEM file from %s: %w", pemPath, err)
} }
ok := rootCertPool.AppendCertsFromPEM(pem) ok := rootCertPool.AppendCertsFromPEM(pem)
@ -275,13 +273,13 @@ func initDB(url, pemPath string) (*sql.DB, error) {
err = mysql.RegisterTLSConfig("custom", &tls.Config{RootCAs: rootCertPool, MinVersion: tls.VersionTLS12}) err = mysql.RegisterTLSConfig("custom", &tls.Config{RootCAs: rootCertPool, MinVersion: tls.VersionTLS12})
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error register TLS config") return nil, fmt.Errorf("error register TLS config: %w", err)
} }
} }
db, err := sql.Open("mysql", url) db, err := sql.Open("mysql", url)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error opening DB connection") return nil, fmt.Errorf("error opening DB connection: %w", err)
} }
return db, nil return db, nil

View File

@ -130,6 +130,7 @@ func TestPublishingWithTTL(t *testing.T) {
const maxGetDuration = ttlInSeconds * time.Second const maxGetDuration = ttlInSeconds * time.Second
metadata := bindings.Metadata{ metadata := bindings.Metadata{
Base: contribMetadata.Base{
Name: "testQueue", Name: "testQueue",
Properties: map[string]string{ Properties: map[string]string{
"queueName": queueName, "queueName": queueName,
@ -137,6 +138,7 @@ func TestPublishingWithTTL(t *testing.T) {
"deleteWhenUnused": strconv.FormatBool(exclusive), "deleteWhenUnused": strconv.FormatBool(exclusive),
"durable": strconv.FormatBool(durable), "durable": strconv.FormatBool(durable),
}, },
},
} }
logger := logger.NewLogger("test") logger := logger.NewLogger("test")
@ -162,7 +164,7 @@ func TestPublishingWithTTL(t *testing.T) {
}, },
} }
_, err = rabbitMQBinding1.Invoke(context.Backgound(), &writeRequest) _, err = rabbitMQBinding1.Invoke(context.Background(), &writeRequest)
assert.Nil(t, err) assert.Nil(t, err)
time.Sleep(time.Second + (ttlInSeconds * time.Second)) time.Sleep(time.Second + (ttlInSeconds * time.Second))
@ -183,7 +185,7 @@ func TestPublishingWithTTL(t *testing.T) {
contribMetadata.TTLMetadataKey: strconv.Itoa(ttlInSeconds * 1000), contribMetadata.TTLMetadataKey: strconv.Itoa(ttlInSeconds * 1000),
}, },
} }
_, err = rabbitMQBinding2.Invoke(context.Backgound(), &writeRequest) _, err = rabbitMQBinding2.Invoke(context.Background(), &writeRequest)
assert.Nil(t, err) assert.Nil(t, err)
msg, ok, err := getMessageWithRetries(ch, queueName, maxGetDuration) msg, ok, err := getMessageWithRetries(ch, queueName, maxGetDuration)
@ -204,6 +206,7 @@ func TestExclusiveQueue(t *testing.T) {
const maxGetDuration = ttlInSeconds * time.Second const maxGetDuration = ttlInSeconds * time.Second
metadata := bindings.Metadata{ metadata := bindings.Metadata{
Base: contribMetadata.Base{
Name: "testQueue", Name: "testQueue",
Properties: map[string]string{ Properties: map[string]string{
"queueName": queueName, "queueName": queueName,
@ -213,6 +216,7 @@ func TestExclusiveQueue(t *testing.T) {
"exclusive": strconv.FormatBool(exclusive), "exclusive": strconv.FormatBool(exclusive),
contribMetadata.TTLMetadataKey: strconv.FormatInt(ttlInSeconds, 10), contribMetadata.TTLMetadataKey: strconv.FormatInt(ttlInSeconds, 10),
}, },
},
} }
logger := logger.NewLogger("test") logger := logger.NewLogger("test")
@ -257,6 +261,7 @@ func TestPublishWithPriority(t *testing.T) {
const maxPriority = 10 const maxPriority = 10
metadata := bindings.Metadata{ metadata := bindings.Metadata{
Base: contribMetadata.Base{
Name: "testQueue", Name: "testQueue",
Properties: map[string]string{ Properties: map[string]string{
"queueName": queueName, "queueName": queueName,
@ -265,6 +270,7 @@ func TestPublishWithPriority(t *testing.T) {
"durable": strconv.FormatBool(durable), "durable": strconv.FormatBool(durable),
"maxPriority": strconv.FormatInt(maxPriority, 10), "maxPriority": strconv.FormatInt(maxPriority, 10),
}, },
},
} }
logger := logger.NewLogger("test") logger := logger.NewLogger("test")
@ -283,7 +289,7 @@ func TestPublishWithPriority(t *testing.T) {
defer ch.Close() defer ch.Close()
const middlePriorityMsgContent = "middle" const middlePriorityMsgContent = "middle"
_, err = r.Invoke(context.Backgound(), &bindings.InvokeRequest{ _, err = r.Invoke(context.Background(), &bindings.InvokeRequest{
Metadata: map[string]string{ Metadata: map[string]string{
contribMetadata.PriorityMetadataKey: "5", contribMetadata.PriorityMetadataKey: "5",
}, },
@ -292,7 +298,7 @@ func TestPublishWithPriority(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
const lowPriorityMsgContent = "low" const lowPriorityMsgContent = "low"
_, err = r.Invoke(context.Backgound(), &bindings.InvokeRequest{ _, err = r.Invoke(context.Background(), &bindings.InvokeRequest{
Metadata: map[string]string{ Metadata: map[string]string{
contribMetadata.PriorityMetadataKey: "1", contribMetadata.PriorityMetadataKey: "1",
}, },
@ -301,7 +307,7 @@ func TestPublishWithPriority(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
const highPriorityMsgContent = "high" const highPriorityMsgContent = "high"
_, err = r.Invoke(context.Backgound(), &bindings.InvokeRequest{ _, err = r.Invoke(context.Background(), &bindings.InvokeRequest{
Metadata: map[string]string{ Metadata: map[string]string{
contribMetadata.PriorityMetadataKey: "10", contribMetadata.PriorityMetadataKey: "10",
}, },

View File

@ -15,10 +15,10 @@ package redis
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/pkg/errors"
"github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/bindings"
rediscomponent "github.com/dapr/components-contrib/internal/component/redis" rediscomponent "github.com/dapr/components-contrib/internal/component/redis"
@ -66,21 +66,40 @@ func (r *Redis) Ping() error {
} }
func (r *Redis) Operations() []bindings.OperationKind { func (r *Redis) Operations() []bindings.OperationKind {
return []bindings.OperationKind{bindings.CreateOperation} return []bindings.OperationKind{
bindings.CreateOperation,
bindings.DeleteOperation,
bindings.GetOperation,
}
} }
func (r *Redis) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { func (r *Redis) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
if val, ok := req.Metadata["key"]; ok && val != "" { if key, ok := req.Metadata["key"]; ok && key != "" {
key := val switch req.Operation {
case bindings.DeleteOperation:
err := r.client.Del(ctx, key).Err()
if err != nil {
return nil, err
}
case bindings.GetOperation:
data, err := r.client.Get(ctx, key).Result()
if err != nil {
return nil, err
}
rep := &bindings.InvokeResponse{}
rep.Data = []byte(data)
return rep, nil
case bindings.CreateOperation:
_, err := r.client.Do(ctx, "SET", key, req.Data).Result() _, err := r.client.Do(ctx, "SET", key, req.Data).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
default:
return nil, fmt.Errorf("invalid operation type: %s", req.Operation)
}
return nil, nil return nil, nil
} }
return nil, errors.New("redis binding: missing key in request metadata")
return nil, errors.New("redis binding: missing key on write request metadata")
} }
func (r *Redis) Close() error { func (r *Redis) Close() error {

View File

@ -30,7 +30,7 @@ const (
testKey = "test" testKey = "test"
) )
func TestInvoke(t *testing.T) { func TestInvokeCreate(t *testing.T) {
s, c := setupMiniredis() s, c := setupMiniredis()
defer s.Close() defer s.Close()
@ -46,6 +46,7 @@ func TestInvoke(t *testing.T) {
bindingRes, err := bind.Invoke(context.TODO(), &bindings.InvokeRequest{ bindingRes, err := bind.Invoke(context.TODO(), &bindings.InvokeRequest{
Data: []byte(testData), Data: []byte(testData),
Metadata: map[string]string{"key": testKey}, Metadata: map[string]string{"key": testKey},
Operation: bindings.CreateOperation,
}) })
assert.Equal(t, nil, err) assert.Equal(t, nil, err)
assert.Equal(t, true, bindingRes == nil) assert.Equal(t, true, bindingRes == nil)
@ -55,6 +56,56 @@ func TestInvoke(t *testing.T) {
assert.Equal(t, true, getRes == testData) assert.Equal(t, true, getRes == testData)
} }
func TestInvokeGet(t *testing.T) {
s, c := setupMiniredis()
defer s.Close()
bind := &Redis{
client: c,
logger: logger.NewLogger("test"),
}
bind.ctx, bind.cancel = context.WithCancel(context.Background())
_, err := c.Do(context.Background(), "SET", testKey, testData).Result()
assert.Equal(t, nil, err)
bindingRes, err := bind.Invoke(context.TODO(), &bindings.InvokeRequest{
Metadata: map[string]string{"key": testKey},
Operation: bindings.GetOperation,
})
assert.Equal(t, nil, err)
assert.Equal(t, true, string(bindingRes.Data) == testData)
}
func TestInvokeDelete(t *testing.T) {
s, c := setupMiniredis()
defer s.Close()
bind := &Redis{
client: c,
logger: logger.NewLogger("test"),
}
bind.ctx, bind.cancel = context.WithCancel(context.Background())
_, err := c.Do(context.Background(), "SET", testKey, testData).Result()
assert.Equal(t, nil, err)
getRes, err := c.Do(context.Background(), "GET", testKey).Result()
assert.Equal(t, nil, err)
assert.Equal(t, true, getRes == testData)
_, err = bind.Invoke(context.TODO(), &bindings.InvokeRequest{
Metadata: map[string]string{"key": testKey},
Operation: bindings.DeleteOperation,
})
assert.Equal(t, nil, err)
rgetRep, err := c.Do(context.Background(), "GET", testKey).Result()
assert.Equal(t, redis.Nil, err)
assert.Equal(t, nil, rgetRep)
}
func setupMiniredis() (*miniredis.Miniredis, *redis.Client) { func setupMiniredis() (*miniredis.Miniredis, *redis.Client) {
s, err := miniredis.Run() s, err := miniredis.Run()
if err != nil { if err != nil {

View File

@ -28,7 +28,7 @@ import (
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
) )
// Binding represents RethinkDB change change state input binding which fires handler with // Binding represents RethinkDB change state input binding which fires handler with
// both the previous and current state store content each time there is a change. // both the previous and current state store content each time there is a change.
type Binding struct { type Binding struct {
logger logger.Logger logger logger.Logger

View File

@ -15,6 +15,7 @@ package appconfig
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"strconv" "strconv"
"sync" "sync"
@ -189,24 +190,19 @@ func parseMetadata(meta configuration.Metadata) (metadata, error) {
} }
func (r *ConfigurationStore) Get(ctx context.Context, req *configuration.GetRequest) (*configuration.GetResponse, error) { func (r *ConfigurationStore) Get(ctx context.Context, req *configuration.GetRequest) (*configuration.GetResponse, error) {
timeoutContext, cancel := context.WithTimeout(ctx, r.metadata.requestTimeout)
defer cancel()
keys := req.Keys keys := req.Keys
var items map[string]*configuration.Item var items map[string]*configuration.Item
if len(keys) == 0 { if len(keys) == 0 {
var err error var err error
if items, err = r.getAll(timeoutContext, req); err != nil { if items, err = r.getAll(ctx, req); err != nil {
return &configuration.GetResponse{}, err return &configuration.GetResponse{}, err
} }
} else { } else {
items = make(map[string]*configuration.Item, len(keys)) items = make(map[string]*configuration.Item, len(keys))
for _, key := range keys { for _, key := range keys {
// TODO: here contxt.TODO() is used because the SDK panics when a cancelled context is passed in GetSetting resp, err := r.getSettings(
// Issue - https://github.com/Azure/azure-sdk-for-go/issues/19223 . Needs to be modified to use timeoutContext once the SDK is fixed ctx,
resp, err := r.client.GetSetting(
context.TODO(),
key, key,
&azappconfig.GetSettingOptions{ &azappconfig.GetSettingOptions{
Label: r.getLabelFromMetadata(req.Metadata), Label: r.getLabelFromMetadata(req.Metadata),
@ -248,10 +244,10 @@ func (r *ConfigurationStore) getAll(ctx context.Context, req *configuration.GetR
}, },
nil) nil)
// TODO: here contxt.TODO() is used because the SDK panics when a cancelled context is passed in NextPage
// Issue - https://github.com/Azure/azure-sdk-for-go/issues/19223 . It needs to be modified to use ctx once the SDK is fixed
for allSettingsPgr.More() { for allSettingsPgr.More() {
if revResp, err := allSettingsPgr.NextPage(context.TODO()); err == nil { timeoutContext, cancel := context.WithTimeout(ctx, r.metadata.requestTimeout)
defer cancel()
if revResp, err := allSettingsPgr.NextPage(timeoutContext); err == nil {
for _, setting := range revResp.Settings { for _, setting := range revResp.Settings {
item := &configuration.Item{ item := &configuration.Item{
Metadata: map[string]string{}, Metadata: map[string]string{},
@ -295,20 +291,33 @@ func (r *ConfigurationStore) Subscribe(ctx context.Context, req *configuration.S
} }
func (r *ConfigurationStore) doSubscribe(ctx context.Context, req *configuration.SubscribeRequest, handler configuration.UpdateHandler, sentinelKey string, id string) { func (r *ConfigurationStore) doSubscribe(ctx context.Context, req *configuration.SubscribeRequest, handler configuration.UpdateHandler, sentinelKey string, id string) {
var etagVal *azcore.ETag
for { for {
// get sentinel key changes // get sentinel key changes.
_, err := r.Get(ctx, &configuration.GetRequest{ resp, err := r.getSettings(
Keys: []string{sentinelKey}, ctx,
Metadata: req.Metadata, sentinelKey,
}) &azappconfig.GetSettingOptions{
Label: r.getLabelFromMetadata(req.Metadata),
OnlyIfChanged: etagVal,
},
)
if err != nil { if err != nil {
r.logger.Debugf("azure appconfig error: fail to get sentinel key changes or sentinel key's value is unchanged: %s", err) if errors.Is(err, context.Canceled) {
return
}
r.logger.Debugf("azure appconfig error: fail to get sentinel key or sentinel's key %s value is unchanged: %s", sentinelKey, err)
} else { } else {
// if sentinel key has changed then update the Etag value.
etagVal = resp.ETag
items, err := r.Get(ctx, &configuration.GetRequest{ items, err := r.Get(ctx, &configuration.GetRequest{
Keys: req.Keys, Keys: req.Keys,
Metadata: req.Metadata, Metadata: req.Metadata,
}) })
if err != nil { if err != nil {
if errors.Is(err, context.Canceled) {
return
}
r.logger.Errorf("azure appconfig error: fail to get configuration key changes: %s", err) r.logger.Errorf("azure appconfig error: fail to get configuration key changes: %s", err)
} else { } else {
r.handleSubscribedChange(ctx, handler, items, id) r.handleSubscribedChange(ctx, handler, items, id)
@ -322,6 +331,13 @@ func (r *ConfigurationStore) doSubscribe(ctx context.Context, req *configuration
} }
} }
func (r *ConfigurationStore) getSettings(ctx context.Context, key string, getSettingsOptions *azappconfig.GetSettingOptions) (azappconfig.GetSettingResponse, error) {
timeoutContext, cancel := context.WithTimeout(ctx, r.metadata.requestTimeout)
defer cancel()
resp, err := r.client.GetSetting(timeoutContext, key, getSettingsOptions)
return resp, err
}
func (r *ConfigurationStore) handleSubscribedChange(ctx context.Context, handler configuration.UpdateHandler, items *configuration.GetResponse, id string) { func (r *ConfigurationStore) handleSubscribedChange(ctx context.Context, handler configuration.UpdateHandler, items *configuration.GetResponse, id string) {
e := &configuration.UpdateEvent{ e := &configuration.UpdateEvent{
Items: items.Items, Items: items.Items,

View File

@ -16,11 +16,11 @@ package redis
import "time" import "time"
type metadata struct { type metadata struct {
host string Host string
password string Password string
sentinelMasterName string SentinelMasterName string
maxRetries int MaxRetries int
maxRetryBackoff time.Duration MaxRetryBackoff time.Duration
enableTLS bool EnableTLS bool
failover bool Failover bool
} }

View File

@ -78,40 +78,40 @@ func parseRedisMetadata(meta configuration.Metadata) (metadata, error) {
m := metadata{} m := metadata{}
if val, ok := meta.Properties[host]; ok && val != "" { if val, ok := meta.Properties[host]; ok && val != "" {
m.host = val m.Host = val
} else { } else {
return m, errors.New("redis store error: missing host address") return m, errors.New("redis store error: missing host address")
} }
if val, ok := meta.Properties[password]; ok && val != "" { if val, ok := meta.Properties[password]; ok && val != "" {
m.password = val m.Password = val
} }
m.enableTLS = defaultEnableTLS m.EnableTLS = defaultEnableTLS
if val, ok := meta.Properties[enableTLS]; ok && val != "" { if val, ok := meta.Properties[enableTLS]; ok && val != "" {
tls, err := strconv.ParseBool(val) tls, err := strconv.ParseBool(val)
if err != nil { if err != nil {
return m, fmt.Errorf("redis store error: can't parse enableTLS field: %s", err) return m, fmt.Errorf("redis store error: can't parse enableTLS field: %s", err)
} }
m.enableTLS = tls m.EnableTLS = tls
} }
m.maxRetries = defaultMaxRetries m.MaxRetries = defaultMaxRetries
if val, ok := meta.Properties[maxRetries]; ok && val != "" { if val, ok := meta.Properties[maxRetries]; ok && val != "" {
parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize) parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize)
if err != nil { if err != nil {
return m, fmt.Errorf("redis store error: can't parse maxRetries field: %s", err) return m, fmt.Errorf("redis store error: can't parse maxRetries field: %s", err)
} }
m.maxRetries = int(parsedVal) m.MaxRetries = int(parsedVal)
} }
m.maxRetryBackoff = defaultMaxRetryBackoff m.MaxRetryBackoff = defaultMaxRetryBackoff
if val, ok := meta.Properties[maxRetryBackoff]; ok && val != "" { if val, ok := meta.Properties[maxRetryBackoff]; ok && val != "" {
parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize) parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize)
if err != nil { if err != nil {
return m, fmt.Errorf("redis store error: can't parse maxRetryBackoff field: %s", err) return m, fmt.Errorf("redis store error: can't parse maxRetryBackoff field: %s", err)
} }
m.maxRetryBackoff = time.Duration(parsedVal) m.MaxRetryBackoff = time.Duration(parsedVal)
} }
if val, ok := meta.Properties[failover]; ok && val != "" { if val, ok := meta.Properties[failover]; ok && val != "" {
@ -119,13 +119,13 @@ func parseRedisMetadata(meta configuration.Metadata) (metadata, error) {
if err != nil { if err != nil {
return m, fmt.Errorf("redis store error: can't parse failover field: %s", err) return m, fmt.Errorf("redis store error: can't parse failover field: %s", err)
} }
m.failover = failover m.Failover = failover
} }
// set the sentinelMasterName only with failover == true. // set the sentinelMasterName only with failover == true.
if m.failover { if m.Failover {
if val, ok := meta.Properties[sentinelMasterName]; ok && val != "" { if val, ok := meta.Properties[sentinelMasterName]; ok && val != "" {
m.sentinelMasterName = val m.SentinelMasterName = val
} else { } else {
return m, errors.New("redis store error: missing sentinelMasterName") return m, errors.New("redis store error: missing sentinelMasterName")
} }
@ -142,14 +142,14 @@ func (r *ConfigurationStore) Init(metadata configuration.Metadata) error {
} }
r.metadata = m r.metadata = m
if r.metadata.failover { if r.metadata.Failover {
r.client = r.newFailoverClient(m) r.client = r.newFailoverClient(m)
} else { } else {
r.client = r.newClient(m) r.client = r.newClient(m)
} }
if _, err = r.client.Ping(context.TODO()).Result(); err != nil { if _, err = r.client.Ping(context.TODO()).Result(); err != nil {
return fmt.Errorf("redis store: error connecting to redis at %s: %s", m.host, err) return fmt.Errorf("redis store: error connecting to redis at %s: %s", m.Host, err)
} }
r.replicas, err = r.getConnectedSlaves() r.replicas, err = r.getConnectedSlaves()
@ -159,18 +159,18 @@ func (r *ConfigurationStore) Init(metadata configuration.Metadata) error {
func (r *ConfigurationStore) newClient(m metadata) *redis.Client { func (r *ConfigurationStore) newClient(m metadata) *redis.Client {
opts := &redis.Options{ opts := &redis.Options{
Addr: m.host, Addr: m.Host,
Password: m.password, Password: m.Password,
DB: defaultDB, DB: defaultDB,
MaxRetries: m.maxRetries, MaxRetries: m.MaxRetries,
MaxRetryBackoff: m.maxRetryBackoff, MaxRetryBackoff: m.MaxRetryBackoff,
} }
// tell the linter to skip a check here. // tell the linter to skip a check here.
/* #nosec */ /* #nosec */
if m.enableTLS { if m.EnableTLS {
opts.TLSConfig = &tls.Config{ opts.TLSConfig = &tls.Config{
InsecureSkipVerify: m.enableTLS, InsecureSkipVerify: m.EnableTLS,
} }
} }
@ -179,17 +179,17 @@ func (r *ConfigurationStore) newClient(m metadata) *redis.Client {
func (r *ConfigurationStore) newFailoverClient(m metadata) *redis.Client { func (r *ConfigurationStore) newFailoverClient(m metadata) *redis.Client {
opts := &redis.FailoverOptions{ opts := &redis.FailoverOptions{
MasterName: r.metadata.sentinelMasterName, MasterName: r.metadata.SentinelMasterName,
SentinelAddrs: []string{r.metadata.host}, SentinelAddrs: []string{r.metadata.Host},
DB: defaultDB, DB: defaultDB,
MaxRetries: m.maxRetries, MaxRetries: m.MaxRetries,
MaxRetryBackoff: m.maxRetryBackoff, MaxRetryBackoff: m.MaxRetryBackoff,
} }
/* #nosec */ /* #nosec */
if m.enableTLS { if m.EnableTLS {
opts.TLSConfig = &tls.Config{ opts.TLSConfig = &tls.Config{
InsecureSkipVerify: m.enableTLS, InsecureSkipVerify: m.EnableTLS,
} }
} }

View File

@ -264,13 +264,13 @@ func Test_parseRedisMetadata(t *testing.T) {
}}, }},
}, },
want: metadata{ want: metadata{
host: "testHost", Host: "testHost",
password: "testPassword", Password: "testPassword",
enableTLS: true, EnableTLS: true,
maxRetries: 10, MaxRetries: 10,
maxRetryBackoff: time.Second, MaxRetryBackoff: time.Second,
failover: true, Failover: true,
sentinelMasterName: "tesSentinelMasterName", SentinelMasterName: "tesSentinelMasterName",
}, },
}, },
} }

37
go.mod
View File

@ -13,11 +13,12 @@ require (
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0
github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.4.3 github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.5.0
github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.2 github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.2
github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1 github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.10.1 github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.10.1
github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.1.1 github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.1.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1
github.com/Azure/azure-storage-blob-go v0.10.0 github.com/Azure/azure-storage-blob-go v0.10.0
github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd
github.com/Azure/go-amqp v0.17.5 github.com/Azure/go-amqp v0.17.5
@ -28,7 +29,6 @@ require (
github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/Shopify/sarama v1.37.2 github.com/Shopify/sarama v1.37.2
github.com/aerospike/aerospike-client-go v4.5.2+incompatible github.com/aerospike/aerospike-client-go v4.5.2+incompatible
github.com/agrea/ptr v0.0.0-20180711073057-77a518d99b7b
github.com/alibaba/sentinel-golang v1.0.4 github.com/alibaba/sentinel-golang v1.0.4
github.com/alibabacloud-go/darabonba-openapi v0.2.1 github.com/alibabacloud-go/darabonba-openapi v0.2.1
github.com/alibabacloud-go/oos-20190601 v1.0.4 github.com/alibabacloud-go/oos-20190601 v1.0.4
@ -40,16 +40,17 @@ require (
github.com/aliyun/aliyun-tablestore-go-sdk v1.7.7 github.com/aliyun/aliyun-tablestore-go-sdk v1.7.7
github.com/apache/dubbo-go-hessian2 v1.11.3 github.com/apache/dubbo-go-hessian2 v1.11.3
github.com/apache/pulsar-client-go v0.9.0 github.com/apache/pulsar-client-go v0.9.0
github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2 github.com/apache/rocketmq-client-go/v2 v2.1.0
github.com/aws/aws-sdk-go v1.44.128 github.com/aws/aws-sdk-go v1.44.128
github.com/benbjohnson/clock v1.3.0
github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822 github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822
github.com/camunda/zeebe/clients/go/v8 v8.1.3 github.com/camunda/zeebe/clients/go/v8 v8.1.3
github.com/cenkalti/backoff/v4 v4.1.3 github.com/cenkalti/backoff/v4 v4.2.0
github.com/cinience/go_rocketmq v0.0.2 github.com/cinience/go_rocketmq v0.0.2
github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-oidc v2.2.1+incompatible
github.com/cyphar/filepath-securejoin v0.2.3 github.com/cyphar/filepath-securejoin v0.2.3
github.com/dancannon/gorethink v4.0.0+incompatible github.com/dancannon/gorethink v4.0.0+incompatible
github.com/dapr/kit v0.0.2 github.com/dapr/kit v0.0.3
github.com/denisenkom/go-mssqldb v0.12.3 github.com/denisenkom/go-mssqldb v0.12.3
github.com/dghubble/go-twitter v0.0.0-20221024160433-0cc1e72ed6d8 github.com/dghubble/go-twitter v0.0.0-20221024160433-0cc1e72ed6d8
github.com/dghubble/oauth1 v0.7.1 github.com/dghubble/oauth1 v0.7.1
@ -70,11 +71,13 @@ require (
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru v0.5.4
github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a
github.com/http-wasm/http-wasm-host-go v0.2.0
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.6 github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.6
github.com/influxdata/influxdb-client-go v1.4.0 github.com/influxdata/influxdb-client-go v1.4.0
github.com/jackc/pgx/v5 v5.0.4 github.com/jackc/pgx/v5 v5.0.4
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/kubemq-io/kubemq-go v1.7.6
github.com/labd/commercetools-go-sdk v1.1.0 github.com/labd/commercetools-go-sdk v1.1.0
github.com/machinebox/graphql v0.2.2 github.com/machinebox/graphql v0.2.2
github.com/matoous/go-nanoid/v2 v2.0.0 github.com/matoous/go-nanoid/v2 v2.0.0
@ -82,7 +85,7 @@ require (
github.com/mrz1836/postmark v1.3.0 github.com/mrz1836/postmark v1.3.0
github.com/nacos-group/nacos-sdk-go/v2 v2.1.2 github.com/nacos-group/nacos-sdk-go/v2 v2.1.2
github.com/nats-io/nats-server/v2 v2.9.4 github.com/nats-io/nats-server/v2 v2.9.4
github.com/nats-io/nats.go v1.19.0 github.com/nats-io/nats.go v1.19.1
github.com/nats-io/nkeys v0.3.0 github.com/nats-io/nkeys v0.3.0
github.com/nats-io/stan.go v0.10.3 github.com/nats-io/stan.go v0.10.3
github.com/open-policy-agent/opa v0.45.0 github.com/open-policy-agent/opa v0.45.0
@ -91,7 +94,6 @@ require (
github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/rabbitmq/amqp091-go v1.5.0 github.com/rabbitmq/amqp091-go v1.5.0
github.com/robfig/cron/v3 v3.0.1
github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414
github.com/sendgrid/sendgrid-go v3.12.0+incompatible github.com/sendgrid/sendgrid-go v3.12.0+incompatible
github.com/sijms/go-ora/v2 v2.5.3 github.com/sijms/go-ora/v2 v2.5.3
@ -99,9 +101,9 @@ require (
github.com/supplyon/gremcos v0.1.38 github.com/supplyon/gremcos v0.1.38
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.527 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.527
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ssm v1.0.527 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ssm v1.0.527
github.com/tetratelabs/wazero v1.0.0-pre.4
github.com/valyala/fasthttp v1.41.0 github.com/valyala/fasthttp v1.41.0
github.com/vmware/vmware-go-kcl v1.5.0 github.com/vmware/vmware-go-kcl v1.5.0
github.com/wapc/wapc-go v0.5.5
github.com/xdg-go/scram v1.1.1 github.com/xdg-go/scram v1.1.1
go.mongodb.org/mongo-driver v1.10.3 go.mongodb.org/mongo-driver v1.10.3
go.temporal.io/api v1.12.0 go.temporal.io/api v1.12.0
@ -132,7 +134,7 @@ require (
github.com/99designs/keyring v1.2.1 // indirect github.com/99designs/keyring v1.2.1 // indirect
github.com/AthenZ/athenz v1.10.39 // indirect github.com/AthenZ/athenz v1.10.39 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
@ -191,7 +193,7 @@ require (
github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emirpasic/gods v1.12.0 // indirect github.com/emirpasic/gods v1.12.0 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect
github.com/fatih/color v1.9.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/gavv/httpexpect v2.0.0+incompatible // indirect github.com/gavv/httpexpect v2.0.0+incompatible // indirect
github.com/go-kit/kit v0.10.0 // indirect github.com/go-kit/kit v0.10.0 // indirect
github.com/go-kit/log v0.2.0 // indirect github.com/go-kit/log v0.2.0 // indirect
@ -205,6 +207,7 @@ require (
github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.0 // indirect github.com/go-playground/validator/v10 v10.11.0 // indirect
github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect github.com/gobwas/glob v0.2.3 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/uuid v3.3.0+incompatible // indirect github.com/gofrs/uuid v3.3.0+incompatible // indirect
@ -225,12 +228,12 @@ require (
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.1.0 // indirect github.com/hashicorp/go-hclog v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect
@ -256,6 +259,7 @@ require (
github.com/kataras/go-serializer v0.0.4 // indirect github.com/kataras/go-serializer v0.0.4 // indirect
github.com/klauspost/compress v1.15.11 // indirect github.com/klauspost/compress v1.15.11 // indirect
github.com/knadh/koanf v1.4.1 // indirect github.com/knadh/koanf v1.4.1 // indirect
github.com/kubemq-io/protobuf v1.3.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/labstack/echo/v4 v4.9.0 // indirect github.com/labstack/echo/v4 v4.9.0 // indirect
github.com/labstack/gommon v0.3.1 // indirect github.com/labstack/gommon v0.3.1 // indirect
@ -264,7 +268,7 @@ require (
github.com/magiconair/properties v1.8.6 // indirect github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.6 // indirect github.com/mailru/easyjson v0.7.6 // indirect
github.com/matryer/is v1.4.0 // indirect github.com/matryer/is v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.11 // indirect github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
@ -287,7 +291,7 @@ require (
github.com/nats-io/nuid v1.0.1 // indirect github.com/nats-io/nuid v1.0.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pborman/uuid v1.2.1 // indirect github.com/pborman/uuid v1.2.1 // indirect
github.com/pelletier/go-toml v1.9.3 // indirect github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pierrec/lz4 v2.6.0+incompatible // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
@ -309,12 +313,10 @@ require (
github.com/sirupsen/logrus v1.9.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/cast v1.3.1 // indirect github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stathat/consistent v1.0.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect github.com/stretchr/objx v0.5.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/tetratelabs/wazero v1.0.0-pre.3 // indirect
github.com/tidwall/gjson v1.13.0 // indirect github.com/tidwall/gjson v1.13.0 // indirect
github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/pretty v1.2.0 // indirect
@ -364,6 +366,7 @@ require (
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect sigs.k8s.io/yaml v1.2.0 // indirect
stathat.com/c/consistent v1.0.0 // indirect
) )
replace github.com/gobwas/pool => github.com/gobwas/pool v0.2.1 replace github.com/gobwas/pool => github.com/gobwas/pool v0.2.1

101
go.sum
View File

@ -51,6 +51,7 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1
cloud.google.com/go/datastore v1.8.0 h1:2qo2G7hABSeqswa+5Ga3+QB8/ZwKOJmDsCISM9scmsU= cloud.google.com/go/datastore v1.8.0 h1:2qo2G7hABSeqswa+5Ga3+QB8/ZwKOJmDsCISM9scmsU=
cloud.google.com/go/datastore v1.8.0/go.mod h1:q1CpHVByTlXppdqTcu4LIhCsTn3fhtZ5R7+TajciO+M= cloud.google.com/go/datastore v1.8.0/go.mod h1:q1CpHVByTlXppdqTcu4LIhCsTn3fhtZ5R7+TajciO+M=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ= cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ=
cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
@ -102,21 +103,23 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+Q
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.4.3 h1:QzjiMJn/pBxOq1xA3F6ODUvO1agmt7+mI+DZEx6dPtc= github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.5.0 h1:OrKZybbyagpgJiREiIVzH5mV/z9oS4rXqdX7i31DSF0=
github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.4.3/go.mod h1:p74+tP95m8830ypJk53L93+BEsjTKY4SKQ75J2NmS5U= github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.5.0/go.mod h1:p74+tP95m8830ypJk53L93+BEsjTKY4SKQ75J2NmS5U=
github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.2 h1:yJegJqjhrMJ3Oe5s43jOTGL2AsE7pJyx+7Yqls/65tw= github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.2 h1:yJegJqjhrMJ3Oe5s43jOTGL2AsE7pJyx+7Yqls/65tw=
github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.2/go.mod h1:Fy3bbChFm4cZn6oIxYYqKB2FG3rBDxk3NZDLDJCHl+Q= github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.2/go.mod h1:Fy3bbChFm4cZn6oIxYYqKB2FG3rBDxk3NZDLDJCHl+Q=
github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1 h1:bFa9IcjvrCber6gGgDAUZ+I2bO8J7s8JxXmu9fhi2ss= github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1 h1:bFa9IcjvrCber6gGgDAUZ+I2bO8J7s8JxXmu9fhi2ss=
github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1/go.mod h1:l3wvZkG9oW07GLBW5Cd0WwG5asOfJ8aqE8raUvNzLpk= github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1/go.mod h1:l3wvZkG9oW07GLBW5Cd0WwG5asOfJ8aqE8raUvNzLpk=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.10.1 h1:AhZnZn4kUKz36bHJ8AK/FH2tH/q3CAkG+Gme+2ibuak= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.10.1 h1:AhZnZn4kUKz36bHJ8AK/FH2tH/q3CAkG+Gme+2ibuak=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.10.1/go.mod h1:S78i9yTr4o/nXlH76bKjGUye9Z2wSxO5Tz7GoDr4vfI= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.10.1/go.mod h1:S78i9yTr4o/nXlH76bKjGUye9Z2wSxO5Tz7GoDr4vfI=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0 h1:Lg6BW0VPmCwcMlvOviL3ruHFO+H9tZNqscK0AeuFjGM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0 h1:Lg6BW0VPmCwcMlvOviL3ruHFO+H9tZNqscK0AeuFjGM=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.0/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA=
github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.1.1 h1:Zm7A6yKHT3evC/0lquPWJ9hrkRGVIeZOmIvHPv6xV9Q= github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.1.1 h1:Zm7A6yKHT3evC/0lquPWJ9hrkRGVIeZOmIvHPv6xV9Q=
github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.1.1/go.mod h1:LH9XQnMr2ZYxQdVdCrzLO9mxeDyrDFa6wbSI3x5zCZk= github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.1.1/go.mod h1:LH9XQnMr2ZYxQdVdCrzLO9mxeDyrDFa6wbSI3x5zCZk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs= github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs=
github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE=
@ -208,8 +211,6 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia
github.com/agiledragon/gomonkey v2.0.2+incompatible/go.mod h1:2NGfXu1a80LLr2cmWXGBDaHEjb1idR6+FVlX5T3D9hw= github.com/agiledragon/gomonkey v2.0.2+incompatible/go.mod h1:2NGfXu1a80LLr2cmWXGBDaHEjb1idR6+FVlX5T3D9hw=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/agrea/ptr v0.0.0-20180711073057-77a518d99b7b h1:WMhlIaJkDgEQSVJQM06YV+cYUl1r5OY5//ijMXJNqtA=
github.com/agrea/ptr v0.0.0-20180711073057-77a518d99b7b/go.mod h1:Tie46d3UWzXpj+Fh9+DQTyaUxEpFBPOLXrnx7nxlKRo=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
@ -282,9 +283,8 @@ github.com/apache/pulsar-client-go v0.9.0 h1:L5jvGFXJm0JNA/PgUiJctTVHHttCe4wIEFD
github.com/apache/pulsar-client-go v0.9.0/go.mod h1:fSAcBipgz4KQ/VgwZEJtQ71cCXMKm8ezznstrozrngw= github.com/apache/pulsar-client-go v0.9.0/go.mod h1:fSAcBipgz4KQ/VgwZEJtQ71cCXMKm8ezznstrozrngw=
github.com/apache/rocketmq-client-go v1.2.5 h1:2hPoLHpMJy1a57HDNmx7PZKgvlgVYO1Alz925oeqphQ= github.com/apache/rocketmq-client-go v1.2.5 h1:2hPoLHpMJy1a57HDNmx7PZKgvlgVYO1Alz925oeqphQ=
github.com/apache/rocketmq-client-go v1.2.5/go.mod h1:Kap8oXIVLlHF50BGUbN9z97QUp1GaK1nOoCfsZnR2bw= github.com/apache/rocketmq-client-go v1.2.5/go.mod h1:Kap8oXIVLlHF50BGUbN9z97QUp1GaK1nOoCfsZnR2bw=
github.com/apache/rocketmq-client-go/v2 v2.1.0 h1:3eABKfxc1WmS2lLTTbKMe1gZfZV6u1Sx9orFnOfABV0=
github.com/apache/rocketmq-client-go/v2 v2.1.0/go.mod h1:oEZKFDvS7sz/RWU0839+dQBupazyBV7WX5cP6nrio0Q= github.com/apache/rocketmq-client-go/v2 v2.1.0/go.mod h1:oEZKFDvS7sz/RWU0839+dQBupazyBV7WX5cP6nrio0Q=
github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2 h1:UQHWhwyw3tSLRhp0lVn/r/uNUzDnBZcDekGSzaXfz0M=
github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2/go.mod h1:DDYjQ9wxYmJLjgNK4+RqyFE8/13gLK/Bugz4U6zD5MI=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -325,8 +325,9 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm
github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo=
github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -353,9 +354,10 @@ github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n
github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
@ -385,6 +387,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
@ -416,8 +419,8 @@ github.com/dancannon/gorethink v4.0.0+incompatible h1:KFV7Gha3AuqT+gr0B/eKvGhbjm
github.com/dancannon/gorethink v4.0.0+incompatible/go.mod h1:BLvkat9KmZc1efyYwhz3WnybhRZtgF1K929FD8z1avU= github.com/dancannon/gorethink v4.0.0+incompatible/go.mod h1:BLvkat9KmZc1efyYwhz3WnybhRZtgF1K929FD8z1avU=
github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0=
github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0=
github.com/dapr/kit v0.0.2 h1:VNg6RWrBMOdtY0/ZLztyAa/RjyFLaskdO9wt2HIREwk= github.com/dapr/kit v0.0.3 h1:1FCnWjIuAS3OJzJhOlUKNzLxoazYhYIS4oCOOULmWBA=
github.com/dapr/kit v0.0.2/go.mod h1:Q4TWm9+vcPZFGehaJUZt2hvA805wJm7FIuoArytWJ8o= github.com/dapr/kit v0.0.3/go.mod h1:+vh2UIRT0KzFm5YJWfj7az4XVSdodys1OCz1WzNe1Eo=
github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -494,8 +497,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.0/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/go-control-plane v0.10.0/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
@ -506,8 +511,9 @@ github.com/fasthttp-contrib/sessions v0.0.0-20160905201309-74f6ac73d5d5/go.mod h
github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw= github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
@ -522,8 +528,9 @@ github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d8
github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8= github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/getkin/kin-openapi v0.2.0/go.mod h1:V1z9xl9oF5Wt7v32ne4FmiF1alpS4dM6mNzoywPOXlk= github.com/getkin/kin-openapi v0.2.0/go.mod h1:V1z9xl9oF5Wt7v32ne4FmiF1alpS4dM6mNzoywPOXlk=
@ -578,6 +585,7 @@ github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2B
github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
@ -764,8 +772,9 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grandcat/zeroconf v1.0.0 h1:uHhahLBKqwWBV6WZUDAT71044vwOTL+McW0mBJvo6kE= github.com/grandcat/zeroconf v1.0.0 h1:uHhahLBKqwWBV6WZUDAT71044vwOTL+McW0mBJvo6kE=
github.com/grandcat/zeroconf v1.0.0/go.mod h1:lTKmG1zh86XyCoUeIHSA4FJMBwCJiQmGfcP2PdzytEs= github.com/grandcat/zeroconf v1.0.0/go.mod h1:lTKmG1zh86XyCoUeIHSA4FJMBwCJiQmGfcP2PdzytEs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@ -785,6 +794,7 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc= github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc=
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
@ -795,14 +805,16 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.1.0 h1:QsGcniKx5/LuX2eYoeL+Np3UKYPNaN7YKpTh29h8rbw= github.com/hashicorp/go-hclog v1.1.0 h1:QsGcniKx5/LuX2eYoeL+Np3UKYPNaN7YKpTh29h8rbw=
github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@ -869,12 +881,15 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe
github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a h1:j6SSiw7fWemWfrJL801xiQ6xRT7ZImika50xvmPN+tg= github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a h1:j6SSiw7fWemWfrJL801xiQ6xRT7ZImika50xvmPN+tg=
github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a/go.mod h1:VhwtcZ7sg3xq7REqGzEy7ylSWGKz4jZd05eCJropNzI= github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a/go.mod h1:VhwtcZ7sg3xq7REqGzEy7ylSWGKz4jZd05eCJropNzI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/http-wasm/http-wasm-host-go v0.2.0 h1:BEu3SsCtx8JwVTCdITsvod5XlgjF9UQVJ8TxjFQJNs8=
github.com/http-wasm/http-wasm-host-go v0.2.0/go.mod h1:OTNlRT3nkPc+WpuxZe1lgZ+X31GaoghBg01SQkPKMjs=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible h1:tANYIteuFrosKbRYUk1Yo/OGJjbt4x3OVg211Qc60M0= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible h1:tANYIteuFrosKbRYUk1Yo/OGJjbt4x3OVg211Qc60M0=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.6 h1:18ZrfdnOMi2tx59TioYV5zFuCfD4YzoEz62ktQBOEeU= github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.6 h1:18ZrfdnOMi2tx59TioYV5zFuCfD4YzoEz62ktQBOEeU=
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.6/go.mod h1:QpZ96CRqyqd5fEODVmnzDNp3IWi5W95BFmWz1nfkq+s= github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.6/go.mod h1:QpZ96CRqyqd5fEODVmnzDNp3IWi5W95BFmWz1nfkq+s=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
@ -979,6 +994,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubemq-io/kubemq-go v1.7.6 h1:AKQb6jbWzJRiNub/9wLHdkUnsBPtc8TImtiSNlKxug8=
github.com/kubemq-io/kubemq-go v1.7.6/go.mod h1:oJVQFu794S9Df5AoEbaeM7s0knMjbKJs66PTLZzvk4g=
github.com/kubemq-io/protobuf v1.3.1 h1:b4QcnpujV8U3go8pa2+FTESl6ygU6hY8APYibRtyemo=
github.com/kubemq-io/protobuf v1.3.1/go.mod h1:mzbGBI05R+GhFLD520xweEIvDM+m4nI7ruJDhgEncas=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/labd/commercetools-go-sdk v1.1.0 h1:iX0JDkfAsZPXs0FUioYoFZ3Gm/GG9dEOqkv8vz439MM= github.com/labd/commercetools-go-sdk v1.1.0 h1:iX0JDkfAsZPXs0FUioYoFZ3Gm/GG9dEOqkv8vz439MM=
@ -999,6 +1018,7 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linkedin/goavro/v2 v2.9.8 h1:jN50elxBsGBDGVDEKqUlDuU1cFwJ11K/yrJCBMe/7Wg= github.com/linkedin/goavro/v2 v2.9.8 h1:jN50elxBsGBDGVDEKqUlDuU1cFwJ11K/yrJCBMe/7Wg=
github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/machinebox/graphql v0.2.2 h1:dWKpJligYKhYKO5A2gvNhkJdQMNZeChZYyBbrZkBZfo= github.com/machinebox/graphql v0.2.2 h1:dWKpJligYKhYKO5A2gvNhkJdQMNZeChZYyBbrZkBZfo=
github.com/machinebox/graphql v0.2.2/go.mod h1:F+kbVMHuwrQ5tYgU9JXlnskM8nOaFxCAEolaQybkjWA= github.com/machinebox/graphql v0.2.2/go.mod h1:F+kbVMHuwrQ5tYgU9JXlnskM8nOaFxCAEolaQybkjWA=
@ -1021,8 +1041,10 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
@ -1068,6 +1090,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY=
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@ -1116,8 +1139,8 @@ github.com/nats-io/nats-streaming-server v0.25.2/go.mod h1:bRbgx+iCG6EZEXpqVMroR
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.17.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nats.go v1.17.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.19.0 h1:H6j8aBnTQFoVrTGB6Xjd903UMdE7jz6DS4YkmAqgZ9Q= github.com/nats-io/nats.go v1.19.1 h1:pDQZthDfxRMSJ0ereExAM9ODf3JyS42Exk7iCMdbpec=
github.com/nats-io/nats.go v1.19.0/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA= github.com/nats-io/nats.go v1.19.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
@ -1178,8 +1201,9 @@ github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
@ -1268,7 +1292,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -1287,6 +1310,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 h1:AJNDS0kP60X8wwWFvbLPwDuojxubj9pbfK7pjHw0vKg= github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 h1:AJNDS0kP60X8wwWFvbLPwDuojxubj9pbfK7pjHw0vKg=
github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
@ -1332,11 +1356,13 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
@ -1349,10 +1375,9 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U= github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
@ -1386,20 +1411,17 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.527 h1:hpZM
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.527/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.527/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ssm v1.0.527 h1:WdSZURoWsFkpnMPyOZortjaL2z6jjk1erNPVdFB319o= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ssm v1.0.527 h1:WdSZURoWsFkpnMPyOZortjaL2z6jjk1erNPVdFB319o=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ssm v1.0.527/go.mod h1:nzU/d/X/76YkHxzwfQY9+DtH53Wegy2ZZQ52W3nnMZE= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ssm v1.0.527/go.mod h1:nzU/d/X/76YkHxzwfQY9+DtH53Wegy2ZZQ52W3nnMZE=
github.com/tetratelabs/wazero v1.0.0-pre.3 h1:Z5fbogMUGcERzaQb9mQU8+yJSy0bVvv2ce3dfR4wcZg= github.com/tetratelabs/wazero v1.0.0-pre.4 h1:RBJQT5OzmORkSp6MmZDWoFEr0zXjk4pmvMKAdeUnsaI=
github.com/tetratelabs/wazero v1.0.0-pre.3/go.mod h1:M8UDNECGm/HVjOfq0EOe4QfCY9Les1eq54IChMLETbc= github.com/tetratelabs/wazero v1.0.0-pre.4/go.mod h1:u8wrFmpdrykiFK0DFPiFm5a4+0RzsdmXYVtijBKqUVo=
github.com/tevid/gohamcrest v1.1.1/go.mod h1:3UvtWlqm8j5JbwYZh80D/PVBt0mJ1eJiYgZMibh0H/k= github.com/tevid/gohamcrest v1.1.1/go.mod h1:3UvtWlqm8j5JbwYZh80D/PVBt0mJ1eJiYgZMibh0H/k=
github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA= github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
github.com/tidwall/gjson v1.8.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk=
github.com/tidwall/gjson v1.13.0 h1:3TFY9yxOQShrvmjdM76K+jc66zJeT6D3/VFFYCGQf7M= github.com/tidwall/gjson v1.13.0 h1:3TFY9yxOQShrvmjdM76K+jc66zJeT6D3/VFFYCGQf7M=
github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg=
@ -1434,9 +1456,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vmware/vmware-go-kcl v1.5.0 h1:lTptJptznhVOHS7CSuhd/2yDJa7deTBRHaj3zAvhJt8= github.com/vmware/vmware-go-kcl v1.5.0 h1:lTptJptznhVOHS7CSuhd/2yDJa7deTBRHaj3zAvhJt8=
github.com/vmware/vmware-go-kcl v1.5.0/go.mod h1:P92YfaWfQyudNf62BNx+E2rJn9pd165MhHsRt8ajkpM= github.com/vmware/vmware-go-kcl v1.5.0/go.mod h1:P92YfaWfQyudNf62BNx+E2rJn9pd165MhHsRt8ajkpM=
github.com/wapc/wapc-go v0.5.5 h1:FVMcscIUvARubkXS5hU2l7Z6k03cRE1mxc7Vpv56fIQ=
github.com/wapc/wapc-go v0.5.5/go.mod h1:7hCuY3L3Kz0BHzqPzgnkf8tUDRqKP3rfh38jhx5ZI4g=
github.com/wasmerio/wasmer-go v1.0.4 h1:MnqHoOGfiQ8MMq2RF6wyCeebKOe84G88h5yv+vmxJgs=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E=
@ -1481,11 +1500,14 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8=
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY=
@ -1564,6 +1586,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@ -1588,6 +1611,7 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20221028150844-83b7d23a625f h1:Al51T6tzvuh3oiwX11vex3QgJ2XTedFPGmbEVh8cdoc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@ -1614,6 +1638,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1673,6 +1698,7 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@ -1706,6 +1732,7 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
@ -1816,10 +1843,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1991,6 +2020,7 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
@ -2078,6 +2108,8 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
@ -2145,6 +2177,7 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=

View File

@ -0,0 +1,111 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobstorage
import (
"context"
"fmt"
"net/url"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
azauth "github.com/dapr/components-contrib/internal/authentication/azure"
mdutils "github.com/dapr/components-contrib/metadata"
"github.com/dapr/kit/logger"
)
const (
// Specifies the maximum number of HTTP requests that will be made to retry blob operations. A value
// of zero means that no additional HTTP requests will be made.
defaultBlobRetryCount = 3
)
func CreateContainerStorageClient(log logger.Logger, meta map[string]string) (*container.Client, *BlobStorageMetadata, error) {
m, err := parseMetadata(meta)
if err != nil {
return nil, nil, err
}
userAgent := "dapr-" + logger.DaprVersion
options := container.ClientOptions{
ClientOptions: azcore.ClientOptions{
Retry: policy.RetryOptions{
MaxRetries: m.RetryCount,
},
Telemetry: policy.TelemetryOptions{
ApplicationID: userAgent,
},
},
}
settings, err := azauth.NewEnvironmentSettings("storage", meta)
if err != nil {
return nil, nil, err
}
var customEndpoint string
if val, ok := mdutils.GetMetadataProperty(meta, azauth.StorageEndpointKeys...); ok && val != "" {
customEndpoint = val
}
var URL *url.URL
if customEndpoint != "" {
var parseErr error
URL, parseErr = url.Parse(fmt.Sprintf("%s/%s/%s", customEndpoint, m.AccountName, m.ContainerName))
if parseErr != nil {
return nil, nil, parseErr
}
} else {
env := settings.AzureEnvironment
URL, _ = url.Parse(fmt.Sprintf("https://%s.blob.%s/%s", m.AccountName, env.StorageEndpointSuffix, m.ContainerName))
}
var clientErr error
var client *container.Client
// Try using shared key credentials first
if m.AccountKey != "" {
credential, newSharedKeyErr := azblob.NewSharedKeyCredential(m.AccountName, m.AccountKey)
if err != nil {
return nil, nil, fmt.Errorf("invalid shared key credentials with error: %w", newSharedKeyErr)
}
client, clientErr = container.NewClientWithSharedKeyCredential(URL.String(), credential, &options)
if clientErr != nil {
return nil, nil, fmt.Errorf("cannot init Blobstorage container client: %w", err)
}
} else {
// fallback to AAD
credential, tokenErr := settings.GetTokenCredential()
if err != nil {
return nil, nil, fmt.Errorf("invalid token credentials with error: %w", tokenErr)
}
client, clientErr = container.NewClient(URL.String(), credential, &options)
}
if clientErr != nil {
return nil, nil, fmt.Errorf("cannot init Blobstorage client: %w", clientErr)
}
createContainerOptions := container.CreateOptions{
Access: &m.PublicAccessLevel,
Metadata: map[string]string{},
}
timeoutCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err = client.Create(timeoutCtx, &createContainerOptions)
cancel()
// Don't return error, container might already exist
log.Debugf("error creating container: %v", err)
return client, m, nil
}

View File

@ -0,0 +1,64 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobstorage
import (
"testing"
"github.com/stretchr/testify/assert"
azauth "github.com/dapr/components-contrib/internal/authentication/azure"
"github.com/dapr/kit/logger"
)
type scenario struct {
metadata map[string]string
expectedFailureSubString string
}
func TestClientInitFailures(t *testing.T) {
log := logger.NewLogger("test")
scenarios := map[string]scenario{
"missing accountName": {
metadata: createTestMetadata(false, true, true),
expectedFailureSubString: "missing or empty accountName field from metadata",
},
"missing container": {
metadata: createTestMetadata(true, true, false),
expectedFailureSubString: "missing or empty containerName field from metadata",
},
}
for name, s := range scenarios {
t.Run(name, func(t *testing.T) {
_, _, err := CreateContainerStorageClient(log, s.metadata)
assert.Contains(t, err.Error(), s.expectedFailureSubString)
})
}
}
func createTestMetadata(accountName bool, accountKey bool, container bool) map[string]string {
m := map[string]string{}
if accountName {
m[azauth.StorageAccountNameKeys[0]] = "account"
}
if accountKey {
m[azauth.StorageAccountKeyKeys[0]] = "key"
}
if container {
m[azauth.StorageContainerNameKeys[0]] = "test"
}
return m
}

View File

@ -0,0 +1,88 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobstorage
import (
"fmt"
"strconv"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
azauth "github.com/dapr/components-contrib/internal/authentication/azure"
mdutils "github.com/dapr/components-contrib/metadata"
)
type BlobStorageMetadata struct {
AccountName string
AccountKey string
ContainerName string
RetryCount int32 `json:"retryCount,string"`
DecodeBase64 bool `json:"decodeBase64,string"`
PublicAccessLevel azblob.PublicAccessType
}
func parseMetadata(meta map[string]string) (*BlobStorageMetadata, error) {
m := BlobStorageMetadata{
RetryCount: defaultBlobRetryCount,
}
mdutils.DecodeMetadata(meta, &m)
if val, ok := mdutils.GetMetadataProperty(meta, azauth.StorageAccountNameKeys...); ok && val != "" {
m.AccountName = val
} else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageAccountNameKeys[0])
}
if val, ok := mdutils.GetMetadataProperty(meta, azauth.StorageContainerNameKeys...); ok && val != "" {
m.ContainerName = val
} else {
return nil, fmt.Errorf("missing or empty %s field from metadata", azauth.StorageContainerNameKeys[0])
}
if val, ok := mdutils.GetMetadataProperty(meta, azauth.StorageAccountKeyKeys...); ok && val != "" {
m.AccountKey = val
}
// per the Dapr documentation "none" is a valid value
if m.PublicAccessLevel == "none" {
m.PublicAccessLevel = ""
}
if m.PublicAccessLevel != "" && !isValidPublicAccessType(m.PublicAccessLevel) {
return nil, fmt.Errorf("invalid public access level: %s; allowed: %s",
m.PublicAccessLevel, azblob.PossiblePublicAccessTypeValues())
}
// we need this key for backwards compatibility
if val, ok := meta["getBlobRetryCount"]; ok && val != "" {
// convert val from string to int32
parseInt, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return nil, err
}
m.RetryCount = int32(parseInt)
}
return &m, nil
}
func isValidPublicAccessType(accessType azblob.PublicAccessType) bool {
validTypes := azblob.PossiblePublicAccessTypeValues()
for _, item := range validTypes {
if item == accessType {
return true
}
}
return false
}

View File

@ -0,0 +1,79 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobstorage
import (
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/stretchr/testify/assert"
)
func TestParseMetadata(t *testing.T) {
var m map[string]string
t.Run("parse all metadata", func(t *testing.T) {
m = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"getBlobRetryCount": "5",
"decodeBase64": "true",
}
meta, err := parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, "test", meta.ContainerName)
assert.Equal(t, "account", meta.AccountName)
// storageAccessKey is parsed in the azauth package
assert.Equal(t, true, meta.DecodeBase64)
assert.Equal(t, int32(5), meta.RetryCount)
assert.Equal(t, "", string(meta.PublicAccessLevel))
})
t.Run("parse metadata with publicAccessLevel = blob", func(t *testing.T) {
m = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"publicAccessLevel": "blob",
}
meta, err := parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, azblob.PublicAccessTypeBlob, meta.PublicAccessLevel)
})
t.Run("parse metadata with publicAccessLevel = container", func(t *testing.T) {
m = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"publicAccessLevel": "container",
}
meta, err := parseMetadata(m)
assert.Nil(t, err)
assert.Equal(t, azblob.PublicAccessTypeContainer, meta.PublicAccessLevel)
})
t.Run("parse metadata with invalid publicAccessLevel", func(t *testing.T) {
m = map[string]string{
"storageAccount": "account",
"storageAccessKey": "key",
"container": "test",
"publicAccessLevel": "invalid",
}
_, err := parseMetadata(m)
assert.Error(t, err)
})
}

View File

@ -0,0 +1,118 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobstorage
import (
b64 "encoding/base64"
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/dapr/kit/logger"
)
const (
contentTypeKey = "contenttype"
contentMD5Key = "contentmd5"
contentEncodingKey = "contentencoding"
contentLanguageKey = "contentlanguage"
contentDispositionKey = "contentdisposition"
cacheControlKey = "cachecontrol"
)
func CreateBlobHTTPHeadersFromRequest(meta map[string]string, contentType *string, log logger.Logger) (blob.HTTPHeaders, error) {
// build map to support arbitrary case
caseMap := make(map[string]string)
for k := range meta {
caseMap[strings.ToLower(k)] = k
}
blobHTTPHeaders := blob.HTTPHeaders{}
if val, ok := meta[caseMap[contentTypeKey]]; ok && val != "" {
blobHTTPHeaders.BlobContentType = &val
delete(meta, caseMap[contentTypeKey])
}
if contentType != nil {
if blobHTTPHeaders.BlobContentType != nil {
log.Warnf("ContentType received from request Metadata %s, as well as ContentType property %s, choosing value from contentType property", blobHTTPHeaders.BlobContentType, *contentType)
}
blobHTTPHeaders.BlobContentType = contentType
}
if val, ok := meta[caseMap[contentMD5Key]]; ok && val != "" {
sDec, err := b64.StdEncoding.DecodeString(val)
if err != nil || len(sDec) != 16 {
return blob.HTTPHeaders{}, fmt.Errorf("the MD5 value specified in Content MD5 is invalid, MD5 value must be 128 bits and base64 encoded")
}
blobHTTPHeaders.BlobContentMD5 = sDec
delete(meta, caseMap[contentMD5Key])
}
if val, ok := meta[caseMap[contentEncodingKey]]; ok && val != "" {
blobHTTPHeaders.BlobContentEncoding = &val
delete(meta, caseMap[contentEncodingKey])
}
if val, ok := meta[caseMap[contentLanguageKey]]; ok && val != "" {
blobHTTPHeaders.BlobContentLanguage = &val
delete(meta, caseMap[contentLanguageKey])
}
if val, ok := meta[caseMap[contentDispositionKey]]; ok && val != "" {
blobHTTPHeaders.BlobContentDisposition = &val
delete(meta, caseMap[contentDispositionKey])
}
if val, ok := meta[caseMap[cacheControlKey]]; ok && val != "" {
blobHTTPHeaders.BlobCacheControl = &val
delete(meta, caseMap[cacheControlKey])
}
return blobHTTPHeaders, nil
}
func SanitizeMetadata(log logger.Logger, metadata map[string]string) map[string]string {
for key, val := range metadata {
// Keep only letters and digits
n := 0
newKey := make([]byte, len(key))
for i := 0; i < len(key); i++ {
if (key[i] >= 'A' && key[i] <= 'Z') ||
(key[i] >= 'a' && key[i] <= 'z') ||
(key[i] >= '0' && key[i] <= '9') {
newKey[n] = key[i]
n++
}
}
if n != len(key) {
nks := string(newKey[:n])
log.Warnf("metadata key %s contains disallowed characters, sanitized to %s", key, nks)
delete(metadata, key)
metadata[nks] = val
key = nks
}
// Remove all non-ascii characters
n = 0
newVal := make([]byte, len(val))
for i := 0; i < len(val); i++ {
if val[i] > 127 {
continue
}
newVal[n] = val[i]
n++
}
metadata[key] = string(newVal[:n])
}
return metadata
}

View File

@ -0,0 +1,68 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobstorage
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/dapr/kit/logger"
)
func TestBlobHTTPHeaderGeneration(t *testing.T) {
log := logger.NewLogger("test")
t.Run("Content type is set from request, forward compatibility", func(t *testing.T) {
contentType := "application/json"
requestMetadata := map[string]string{}
blobHeaders, err := CreateBlobHTTPHeadersFromRequest(requestMetadata, &contentType, log)
assert.Nil(t, err)
assert.Equal(t, "application/json", *blobHeaders.BlobContentType)
})
t.Run("Content type and metadata provided (conflict), content type chosen", func(t *testing.T) {
contentType := "application/json"
requestMetadata := map[string]string{
contentTypeKey: "text/plain",
}
blobHeaders, err := CreateBlobHTTPHeadersFromRequest(requestMetadata, &contentType, log)
assert.Nil(t, err)
assert.Equal(t, "application/json", *blobHeaders.BlobContentType)
})
t.Run("ContentType not provided, metadata provided set backward compatibility", func(t *testing.T) {
requestMetadata := map[string]string{
contentTypeKey: "text/plain",
}
blobHeaders, err := CreateBlobHTTPHeadersFromRequest(requestMetadata, nil, log)
assert.Nil(t, err)
assert.Equal(t, "text/plain", *blobHeaders.BlobContentType)
})
}
func TestSanitizeRequestMetadata(t *testing.T) {
log := logger.NewLogger("test")
t.Run("sanitize metadata if necessary", func(t *testing.T) {
m := map[string]string{
"somecustomfield": "some-custom-value",
"specialfield": "special:valueÜ",
"not-allowed:": "not-allowed",
}
meta := SanitizeMetadata(log, m)
assert.Equal(t, meta["somecustomfield"], "some-custom-value")
assert.Equal(t, meta["specialfield"], "special:value")
assert.Equal(t, meta["notallowed"], "not-allowed")
})
}

View File

@ -16,11 +16,16 @@ package metadata
import ( import (
"fmt" "fmt"
"math" "math"
"reflect"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/dapr/components-contrib/internal/utils"
"github.com/dapr/kit/ptr"
) )
const ( const (
@ -140,7 +145,10 @@ func GetMetadataProperty(props map[string]string, keys ...string) (val string, o
func DecodeMetadata(input interface{}, result interface{}) error { func DecodeMetadata(input interface{}, result interface{}) error {
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc( DecodeHook: mapstructure.ComposeDecodeHookFunc(
toTimeDurationHookFunc()), toTimeDurationHookFunc(),
toTruthyBoolHookFunc(),
toStringArrayHookFunc(),
),
Metadata: nil, Metadata: nil,
Result: result, Result: result,
WeaklyTypedInput: true, WeaklyTypedInput: true,
@ -151,3 +159,71 @@ func DecodeMetadata(input interface{}, result interface{}) error {
err = decoder.Decode(input) err = decoder.Decode(input)
return err return err
} }
func toTruthyBoolHookFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f == reflect.TypeOf("") && t == reflect.TypeOf(true) {
val := data.(string)
return utils.IsTruthy(val), nil
}
if f == reflect.TypeOf("") && t == reflect.TypeOf(reflect.TypeOf(ptr.Of(true))) {
val := data.(string)
return ptr.Of(utils.IsTruthy(val)), nil
}
return data, nil
}
}
func toStringArrayHookFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f == reflect.TypeOf("") && t == reflect.TypeOf([]string{}) {
val := data.(string)
return strings.Split(val, ","), nil
}
if f == reflect.TypeOf("") && t == reflect.TypeOf(ptr.Of([]string{})) {
val := data.(string)
return ptr.Of(strings.Split(val, ",")), nil
}
return data, nil
}
}
// GetMetadataInfoFromStructType converts a struct to a map of field name (or struct tag) to field type.
// This is used to generate metadata documentation for components.
func GetMetadataInfoFromStructType(t reflect.Type, metadataMap *map[string]string) error {
// Return if not struct or pointer to struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return fmt.Errorf("not a struct: %s", t.Kind().String())
}
for i := 0; i < t.NumField(); i++ {
currentField := t.Field(i)
mapStructureTag := currentField.Tag.Get("mapstructure")
tags := strings.Split(mapStructureTag, ",")
numTags := len(tags)
if numTags > 1 && tags[numTags-1] == "squash" && currentField.Anonymous {
// traverse embedded struct
GetMetadataInfoFromStructType(currentField.Type, metadataMap)
continue
}
var fieldName string
if numTags > 0 && tags[0] != "" {
fieldName = tags[0]
} else {
fieldName = currentField.Name
}
(*metadataMap)[fieldName] = currentField.Type.String()
}
return nil
}

View File

@ -14,6 +14,7 @@ limitations under the License.
package metadata package metadata
import ( import (
"reflect"
"testing" "testing"
"time" "time"
@ -124,4 +125,116 @@ func TestMetadataDecode(t *testing.T) {
assert.Equal(t, 6*time.Minute, m.MyRegularDuration) assert.Equal(t, 6*time.Minute, m.MyRegularDuration)
assert.Equal(t, Duration{Duration: 3 * time.Second}, m.Myduration) assert.Equal(t, Duration{Duration: 3 * time.Second}, m.Myduration)
}) })
t.Run("Test metadata decode hook for truthy values", func(t *testing.T) {
type testMetadata struct {
BoolPointer *bool
BoolPointerNotProvided *bool
BoolValueOn bool
BoolValue1 bool
BoolValueTrue bool
BoolValue0 bool
BoolValueFalse bool
BoolValueNonsense bool
}
var m testMetadata
testData := make(map[string]string)
testData["boolpointer"] = "on"
testData["boolvalueon"] = "on"
testData["boolvalue1"] = "1"
testData["boolvaluetrue"] = "true"
testData["boolvalue0"] = "0"
testData["boolvaluefalse"] = "false"
testData["boolvaluenonsense"] = "nonsense"
err := DecodeMetadata(testData, &m)
assert.NoError(t, err)
assert.True(t, *m.BoolPointer)
assert.True(t, m.BoolValueOn)
assert.True(t, m.BoolValue1)
assert.True(t, m.BoolValueTrue)
assert.False(t, m.BoolValue0)
assert.False(t, m.BoolValueFalse)
assert.False(t, m.BoolValueNonsense)
assert.Nil(t, m.BoolPointerNotProvided)
})
t.Run("Test metadata decode for string arrays", func(t *testing.T) {
type testMetadata struct {
StringArray []string
StringArrayPointer *[]string
EmptyStringArray []string
EmptyStringArrayPointer *[]string
EmptyStringArrayWithComma []string
EmptyStringArrayPointerWithComma *[]string
StringArrayOneElement []string
StringArrayOneElementPointer *[]string
StringArrayOneElementWithComma []string
StringArrayOneElementPointerWithComma *[]string
}
var m testMetadata
testData := make(map[string]string)
testData["stringarray"] = "one,two,three"
testData["stringarraypointer"] = "one,two,three"
testData["emptystringarray"] = ""
testData["emptystringarraypointer"] = ""
testData["stringarrayoneelement"] = "test"
testData["stringarrayoneelementpointer"] = "test"
testData["stringarrayoneelementwithcomma"] = "test,"
testData["stringarrayoneelementpointerwithcomma"] = "test,"
testData["emptystringarraywithcomma"] = ","
testData["emptystringarraypointerwithcomma"] = ","
err := DecodeMetadata(testData, &m)
assert.NoError(t, err)
assert.Equal(t, []string{"one", "two", "three"}, m.StringArray)
assert.Equal(t, []string{"one", "two", "three"}, *m.StringArrayPointer)
assert.Equal(t, []string{""}, m.EmptyStringArray)
assert.Equal(t, []string{""}, *m.EmptyStringArrayPointer)
assert.Equal(t, []string{"test"}, m.StringArrayOneElement)
assert.Equal(t, []string{"test"}, *m.StringArrayOneElementPointer)
assert.Equal(t, []string{"test", ""}, m.StringArrayOneElementWithComma)
assert.Equal(t, []string{"test", ""}, *m.StringArrayOneElementPointerWithComma)
assert.Equal(t, []string{"", ""}, m.EmptyStringArrayWithComma)
assert.Equal(t, []string{"", ""}, *m.EmptyStringArrayPointerWithComma)
})
}
func TestMetadataStructToStringMap(t *testing.T) {
t.Run("Test metadata struct to metadata info conversion", func(t *testing.T) {
type NestedStruct struct {
NestedStringCustom string `mapstructure:"nested_string_custom"`
NestedString string
}
type testMetadata struct {
NestedStruct `mapstructure:",squash"`
Mystring string
Myduration Duration
Myinteger int
Myfloat64 float64
Mybool *bool `json:",omitempty"`
MyRegularDuration time.Duration
SomethingWithCustomName string `mapstructure:"something_with_custom_name"`
}
m := testMetadata{}
metadatainfo := map[string]string{}
GetMetadataInfoFromStructType(reflect.TypeOf(m), &metadatainfo)
assert.Equal(t, "string", metadatainfo["Mystring"])
assert.Equal(t, "metadata.Duration", metadatainfo["Myduration"])
assert.Equal(t, "int", metadatainfo["Myinteger"])
assert.Equal(t, "float64", metadatainfo["Myfloat64"])
assert.Equal(t, "*bool", metadatainfo["Mybool"])
assert.Equal(t, "time.Duration", metadatainfo["MyRegularDuration"])
assert.Equal(t, "string", metadatainfo["something_with_custom_name"])
assert.NotContains(t, metadatainfo, "NestedStruct")
assert.NotContains(t, metadatainfo, "SomethingWithCustomName")
assert.Equal(t, "string", metadatainfo["nested_string_custom"])
assert.Equal(t, "string", metadatainfo["NestedString"])
})
} }

View File

@ -1,5 +1,23 @@
# WebAssembly Middleware ## WebAssembly Middleware
Middleware components in this directory load WebAssembly binaries (wasm) to This component lets you manipulate an incoming request or serve a response with custom logic compiled using the [htp-wasm](https://http-wasm.io/) Application Binary Interface (ABI). The `handle_request` function receives an incoming request and can manipulate it or serve a response as necessary.
modify requests and responses. Each middleware describes its features and
constraints such as required function exports. Please see the [documentation](https://github.com/dapr/docs/blob/v1.9/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md) for general configuration.
### Generating Wasm
To compile your wasm, you must compile source using an SDK such as [http-wasm-guest-tinygo](https://github.com/http-wasm/http-wasm-guest-tinygo). You can also make a copy of [hello.go](./example/example.go) and replace the `handler.HandleFn` function with your custom logic.
If using TinyGo, compile like so and set the `path` attribute to the output:
```bash
tinygo build -o router.wasm -scheduler=none --no-debug -target=wasi router.go`
```
### Notes
* This is an alpha feature, so configuration is subject to change.
* This module implements the host side of the http-wasm handler protocol.
* This uses [wazero](https://wazero.io) for the WebAssembly runtime as it has no dependencies,
nor relies on CGO. This allows installation without shared libraries.
* Many WebAssembly compilers leave memory unbounded and/or set to 16MB. To
avoid resource exhaustion, assign [concurrency controls](https://docs.dapr.io/operations/configuration/control-concurrency/).

View File

@ -1,37 +0,0 @@
## Basic WebAssembly Middleware
WebAssembly is a way to safely run code compiled in other languages. Runtimes
execute WebAssembly Modules (Wasm), which are most often binaries with a `.wasm`
extension.
This component allows you to rewrite a request URI with custom logic compiled
to a Wasm using the [waPC protocol][1].
Please see the [documentation][2] for general configuration.
### Generating Wasm
To compile your wasm, you must compile source using a wapc-go guest SDK such as
[TinyGo][3]. You can also make a copy of [hello.go](./example/example.go) and
replace the function `rewrite` with your custom logic.
If using TinyGo, compile like so and set the `path` attribute to the output:
```bash
tinygo build -o example.wasm -scheduler=none --no-debug -target=wasi example.go`
```
### Notes
* This is an alpha feature, so configuration is subject to change.
* This module implements the host side of the waPC protocol using [wapc-go][4].
* This uses [wazero][5] for the WebAssembly runtime as it has no dependencies,
nor relies on CGO. This allows installation without shared libraries.
* Many WebAssembly compilers leave memory unbounded and/or set to 16MB. Do not
set a large pool size without considering memory amplification.
[1]: https://wapc.io/docs/spec/
[2]: https://github.com/dapr/docs/blob/v1.8/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md
[3]: https://github.com/wapc/wapc-guest-tinygo
[4]: https://github.com/wapc/wapc-go
[5]: https://wazero.io

View File

@ -1,198 +0,0 @@
package basic
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"runtime"
"strconv"
"time"
"github.com/wapc/wapc-go"
"github.com/wapc/wapc-go/engines/wazero"
"github.com/dapr/components-contrib/internal/httputils"
"github.com/dapr/components-contrib/middleware"
"github.com/dapr/kit/logger"
)
// ctx substitutes for context propagation until middleware APIs support it.
var ctx = context.Background()
// middlewareMetadata includes configuration used for the WebAssembly handler.
// Detailed notes are in README.md for visibility.
//
// Note: When changing this, you must update the docs with summary comments per
// field.
// https://github.com/dapr/docs/blob/v1.8/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md
type middlewareMetadata struct {
// Path is where to load a `%.wasm` file that implements the guest side of
// the waPC protocol. No default.
Path string `json:"path"`
// PoolSize determines the amount of modules at the given path to load, per
// request handler. Default: 10
PoolSize poolSizeJSON `json:"poolSize"`
// guest is WebAssembly binary implementing the waPC guest, loaded from Path.
guest []byte
}
// poolSizeJSON is needed because go cannot unmarshal an integer from a string.
type poolSizeJSON uint32
// UnmarshalJSON allows decoding of a quoted uint32
func (s *poolSizeJSON) UnmarshalJSON(b []byte) error {
var n json.Number
if err := json.Unmarshal(b, &n); err != nil {
return fmt.Errorf("invalid poolSize: %w", err)
}
if i, err := strconv.ParseUint(string(n), 10, 32); err != nil {
return fmt.Errorf("invalid poolSize: %w", err)
} else {
*s = poolSizeJSON(i)
return nil
}
}
// wapcMiddleware is a wasm basic middleware.
type wapcMiddleware struct {
logger logger.Logger
}
// NewMiddleware returns a new wasm basic middleware.
func NewMiddleware(logger logger.Logger) middleware.Middleware {
return &wapcMiddleware{logger: logger}
}
// GetHandler returns the HTTP handler provided by wasm basic middleware.
func (m *wapcMiddleware) GetHandler(metadata middleware.Metadata) (func(next http.Handler) http.Handler, error) {
rh, err := m.getHandler(metadata)
if err != nil {
return nil, err
}
runtime.SetFinalizer(rh, (*wapcRequestHandler).Close)
return rh.requestHandler, nil
}
// getHandler is extracted for unit testing.
func (m *wapcMiddleware) getHandler(metadata middleware.Metadata) (*wapcRequestHandler, error) {
meta, err := m.getMetadata(metadata)
if err != nil {
return nil, fmt.Errorf("wasm basic: failed to parse metadata: %w", err)
}
var stdout, stderr bytes.Buffer
config := &wapc.ModuleConfig{
Logger: m.log, // waPC messages go here
Stdout: &stdout, // reset per request
Stderr: &stderr,
}
// This is a simple case, so the binary does not need any callbacks.
mod, err := wazero.Engine().New(ctx, wapc.NoOpHostCallHandler, meta.guest, config)
if err != nil {
return nil, fmt.Errorf("wasm basic: error compiling wasm at %s: %w", meta.Path, err)
}
// WebAssembly modules are not goroutine safe (because they have no atomics
// to implement garbage collection safely). Hence, we need a pool.
pool, err := wapc.NewPool(ctx, mod, uint64(meta.PoolSize))
if err != nil {
return nil, fmt.Errorf("error creating module pool from wasm at %s: %w", meta.Path, err)
}
return &wapcRequestHandler{mod: mod, logger: m.logger, stdout: &stdout, stderr: &stderr, pool: pool}, nil
}
// log implements wapc.Logger.
func (m *wapcMiddleware) log(msg string) {
m.logger.Info(msg)
}
func (m *wapcMiddleware) getMetadata(metadata middleware.Metadata) (*middlewareMetadata, error) {
b, err := json.Marshal(metadata.Properties)
if err != nil {
return nil, err
}
var data middlewareMetadata
err = json.Unmarshal(b, &data)
if err != nil {
return nil, err
}
if data.Path == "" {
return nil, errors.New("missing path")
}
data.guest, err = os.ReadFile(data.Path)
if err != nil {
return nil, fmt.Errorf("error reading path: %w", err)
}
if data.PoolSize == 0 {
data.PoolSize = 10 // Default
}
return &data, nil
}
type wapcRequestHandler struct {
mod wapc.Module
logger logger.Logger
stdout, stderr *bytes.Buffer
pool *wapc.Pool
}
func (rh *wapcRequestHandler) requestHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
instance, err := rh.pool.Get(1 * time.Second)
if err != nil {
httputils.RespondWithErrorAndMessage(w, http.StatusInternalServerError, "wasm pool busy")
return
}
defer func() {
rh.stdout.Reset()
rh.stderr.Reset()
_ = rh.pool.Return(instance)
}()
err = rh.handle(r, instance)
if stdout := rh.stdout.String(); len(stdout) > 0 {
rh.logger.Debugf("wasm stdout: %s", stdout)
}
if stderr := rh.stderr.String(); len(stderr) > 0 {
rh.logger.Debugf("wasm stderr: %s", stderr)
}
if err != nil {
httputils.RespondWithErrorAndMessage(w, http.StatusInternalServerError, err.Error())
} else {
next.ServeHTTP(w, r)
}
})
}
// handle is like http.Handler, except it accepts a waPC instance and returns
// an error.
func (rh *wapcRequestHandler) handle(r *http.Request, instance wapc.Instance) error {
if uri, err := instance.Invoke(ctx, "rewrite", []byte(httputils.RequestURI(r))); err != nil {
return err
} else {
return httputils.SetRequestURI(r, string(uri))
}
}
// Close implements io.Closer
func (rh *wapcRequestHandler) Close() error {
// TODO: we have to use a finalizer as there's no way in dapr to close middleware, yet.
// See https://github.com/dapr/dapr/pull/3088
runtime.SetFinalizer(rh, nil)
rh.pool.Close(ctx)
return rh.mod.Close(ctx)
}

View File

@ -1,177 +0,0 @@
package basic
import (
"fmt"
"log"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/dapr/components-contrib/internal/httputils"
"github.com/dapr/components-contrib/metadata"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/middleware"
"github.com/dapr/components-contrib/middleware/http/wasm/basic/internal/test"
"github.com/dapr/kit/logger"
)
var exampleWasm []byte
// TestMain ensures we can read the example wasm prior to running unit tests.
func TestMain(m *testing.M) {
var err error
exampleWasm, err = os.ReadFile("example/example.wasm")
if err != nil {
log.Panicln(err)
}
os.Exit(m.Run())
}
func Test_NewMiddleWare(t *testing.T) {
l := test.NewLogger()
require.Equal(t, &wapcMiddleware{logger: l}, NewMiddleware(l))
}
func Test_wapcMiddleware_log(t *testing.T) {
l := test.NewLogger()
m := &wapcMiddleware{logger: l}
message := "alert"
m.log(message)
require.Equal(t, "Info(alert)\n", l.(fmt.Stringer).String())
}
func Test_wapcMiddleware_getMetadata(t *testing.T) {
m := &wapcMiddleware{}
type testCase struct {
name string
metadata metadata.Base
expected *middlewareMetadata
expectedErr string
}
tests := []testCase{
{
name: "empty path",
metadata: metadata.Base{Properties: map[string]string{}},
expectedErr: "missing path",
},
{
name: "path dir not file",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example",
}},
// Below ends in "is a directory" in unix, and "The handle is invalid." in windows.
expectedErr: "error reading path: read ./example: ",
},
{
name: "poolSize defaults to 10",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/example.wasm",
}},
expected: &middlewareMetadata{Path: "./example/example.wasm", PoolSize: 10, guest: exampleWasm},
},
{
name: "poolSize",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/example.wasm",
"poolSize": "1",
}},
expected: &middlewareMetadata{Path: "./example/example.wasm", PoolSize: 1, guest: exampleWasm},
},
{
name: "poolSize invalid",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/example.wasm",
"poolSize": "-1",
}},
expectedErr: `invalid poolSize: strconv.ParseUint: parsing "-1": invalid syntax`,
},
}
for _, tt := range tests {
tc := tt
t.Run(tc.name, func(t *testing.T) {
md, err := m.getMetadata(middleware.Metadata{Base: tc.metadata})
if tc.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, tc.expected, md)
} else {
// Use substring match as the error can be different in Windows.
require.Contains(t, err.Error(), tc.expectedErr)
}
})
}
}
func Test_wapcMiddleware_getHandler(t *testing.T) {
m := &wapcMiddleware{logger: logger.NewLogger(t.Name())}
type testCase struct {
name string
metadata metadata.Base
expectedErr string
}
tests := []testCase{
// This just tests the error message prefixes properly. Otherwise, it is
// redundant to Test_wapcMiddleware_getMetadata
{
name: "requires path metadata",
metadata: metadata.Base{Properties: map[string]string{}},
expectedErr: "wasm basic: failed to parse metadata: missing path",
},
// This is more than Test_wapcMiddleware_getMetadata, as it ensures the
// contents are actually wasm.
{
name: "path not wasm",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/example.go",
}},
expectedErr: "wasm basic: error compiling wasm at ./example/example.go: invalid binary",
},
{
name: "ok",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/example.wasm",
}},
},
}
for _, tt := range tests {
tc := tt
t.Run(tc.name, func(t *testing.T) {
h, err := m.getHandler(middleware.Metadata{Base: tc.metadata})
if tc.expectedErr == "" {
require.NoError(t, err)
require.NotNil(t, h.mod)
require.NotNil(t, h.pool)
} else {
require.EqualError(t, err, tc.expectedErr)
}
})
}
}
func Test_Example(t *testing.T) {
meta := metadata.Base{Properties: map[string]string{
// example.wasm was compiled via the following:
// tinygo build -o example.wasm -scheduler=none --no-debug -target=wasi hello.go`
"path": "./example/example.wasm",
"poolSize": "2",
}}
l := test.NewLogger()
handlerFn, err := NewMiddleware(l).GetHandler(middleware.Metadata{Base: meta})
require.NoError(t, err)
handler := handlerFn(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
r := httptest.NewRequest(http.MethodGet, "/v1.0/hi", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
require.Equal(t, "/v1.0/hello", httputils.RequestURI(r))
require.Empty(t, l.(fmt.Stringer).String())
}

View File

@ -1,3 +0,0 @@
build:
@echo "Building Example Guest Wasm"
@tinygo build -o example.wasm -scheduler=none --no-debug -target=wasi example.go

View File

@ -1,15 +0,0 @@
package main
import "github.com/wapc/wapc-guest-tinygo"
func main() {
wapc.RegisterFunctions(wapc.Functions{"rewrite": rewrite})
}
// rewrite returns a new URI if necessary.
func rewrite(requestURI []byte) ([]byte, error) {
if string(requestURI) == "/v1.0/hi" {
return []byte("/v1.0/hello"), nil
}
return requestURI, nil
}

View File

@ -1,2 +0,0 @@
github.com/wapc/wapc-guest-tinygo v0.3.3 h1:jLebiwjVSHLGnS+BRabQ6+XOV7oihVWAc05Hf1SbeR0=
github.com/wapc/wapc-guest-tinygo v0.3.3/go.mod h1:mzM3CnsdSYktfPkaBdZ8v88ZlfUDEy5Jh5XBOV3fYcw=

View File

@ -1,2 +0,0 @@
github.com/wapc/wapc-guest-tinygo v0.3.3 h1:jLebiwjVSHLGnS+BRabQ6+XOV7oihVWAc05Hf1SbeR0=
github.com/wapc/wapc-guest-tinygo v0.3.3/go.mod h1:mzM3CnsdSYktfPkaBdZ8v88ZlfUDEy5Jh5XBOV3fYcw=

View File

@ -1,135 +0,0 @@
package internal_test
import (
"fmt"
"log"
"net/http"
"net/http/httptest"
"os"
"path"
"strconv"
"testing"
"github.com/dapr/components-contrib/metadata"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/middleware"
"github.com/dapr/components-contrib/middleware/http/wasm/basic"
"github.com/dapr/components-contrib/middleware/http/wasm/basic/internal/test"
)
var guestWasm map[string][]byte
const (
guestWasmOutput = "output"
)
// TestMain ensures we can read the test wasm prior to running e2e tests.
func TestMain(m *testing.M) {
wasms := []string{guestWasmOutput}
guestWasm = make(map[string][]byte, len(wasms))
for _, name := range wasms {
if wasm, err := os.ReadFile(path.Join("e2e-guests", name, "main.wasm")); err != nil {
log.Panicln(err)
} else {
guestWasm[name] = wasm
}
}
os.Exit(m.Run())
}
func Test_EndToEnd(t *testing.T) {
type testCase struct {
name string
guest []byte
poolSize int
test func(t *testing.T, handler http.Handler, log fmt.Stringer)
}
tests := []testCase{
{
name: "consoleLog stdout and stderr",
guest: guestWasm[guestWasmOutput],
test: func(t *testing.T, handler http.Handler, log fmt.Stringer) {
r := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
// First, we expect any console logging written inline from
// init (main) and the request (rewrite) funcs to info level.
//
// Then, we expect to see stdout and stderr from both scopes
// at debug level.
require.Equal(t, `Info(main ConsoleLog)
Info(request[0] ConsoleLog)
Debug(wasm stdout: main Stdout
request[0] Stdout
)
Debug(wasm stderr: main Stderr
request[0] Stderr
)
`, log.String())
},
},
{
name: "multiple requests",
guest: guestWasm[guestWasmOutput],
poolSize: 2,
test: func(t *testing.T, handler http.Handler, log fmt.Stringer) {
// Service more requests than the pool size to ensure it works properly.
for i := 0; i < 3; i++ {
r := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
}
// We expect to see initialization (main) twice, once for each
// module in the pool. We expect to see request[1] which shows
// round-robin back to the first module in the pool.
require.Equal(t, `Info(main ConsoleLog)
Info(main ConsoleLog)
Info(request[0] ConsoleLog)
Debug(wasm stdout: main Stdout
main Stdout
request[0] Stdout
)
Debug(wasm stderr: main Stderr
main Stderr
request[0] Stderr
)
Info(request[0] ConsoleLog)
Debug(wasm stdout: request[0] Stdout
)
Debug(wasm stderr: request[0] Stderr
)
Info(request[1] ConsoleLog)
Debug(wasm stdout: request[1] Stdout
)
Debug(wasm stderr: request[1] Stderr
)
`, log.String())
},
},
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
for _, tt := range tests {
tc := tt
t.Run(tc.name, func(t *testing.T) {
poolSize := "1"
if tc.poolSize > 0 {
poolSize = strconv.Itoa(tc.poolSize)
}
wasmPath := path.Join(t.TempDir(), "guest.wasm")
require.NoError(t, os.WriteFile(wasmPath, tc.guest, 0o600))
meta := metadata.Base{Properties: map[string]string{"path": wasmPath, "poolSize": poolSize}}
l := test.NewLogger()
handlerFn, err := basic.NewMiddleware(l).GetHandler(middleware.Metadata{Base: meta})
require.NoError(t, err)
tc.test(t, handlerFn(h), l.(fmt.Stringer))
})
}
}

View File

@ -1,81 +0,0 @@
package test
import (
"bytes"
"fmt"
"github.com/dapr/kit/logger"
)
// compile-time check to ensure testLogger implements logger.testLogger.
var _ logger.Logger = &testLogger{}
func NewLogger() logger.Logger {
var buf bytes.Buffer
return &testLogger{buf: &buf}
}
type testLogger struct {
buf *bytes.Buffer
}
func (l *testLogger) String() string {
return l.buf.String()
}
func (l *testLogger) EnableJSONOutput(enabled bool) {
fmt.Fprintf(l.buf, "EnableJSONOutput(%v)\n", enabled)
}
func (l *testLogger) SetAppID(id string) {
fmt.Fprintf(l.buf, "SetAppID(%v)\n", id)
}
func (l *testLogger) SetOutputLevel(outputLevel logger.LogLevel) {
fmt.Fprintf(l.buf, "SetOutputLevel(%v)\n", outputLevel)
}
func (l *testLogger) WithLogType(logType string) logger.Logger {
fmt.Fprintf(l.buf, "WithLogType(%v)\n", logType)
return l
}
func (l *testLogger) Info(args ...interface{}) {
fmt.Fprintf(l.buf, "Info(%v)\n", fmt.Sprint(args...))
}
func (l *testLogger) Infof(format string, args ...interface{}) {
fmt.Fprintf(l.buf, "Info(%v)\n", fmt.Sprintf(format, args...))
}
func (l *testLogger) Debug(args ...interface{}) {
fmt.Fprintf(l.buf, "Debug(%v)\n", fmt.Sprint(args...))
}
func (l *testLogger) Debugf(format string, args ...interface{}) {
fmt.Fprintf(l.buf, "Debug(%v)\n", fmt.Sprintf(format, args...))
}
func (l *testLogger) Warn(args ...interface{}) {
fmt.Fprintf(l.buf, "Warn(%v)\n", fmt.Sprint(args...))
}
func (l *testLogger) Warnf(format string, args ...interface{}) {
fmt.Fprintf(l.buf, "Warn(%v)\n", fmt.Sprintf(format, args...))
}
func (l *testLogger) Error(args ...interface{}) {
fmt.Fprintf(l.buf, "Error(%v)\n", fmt.Sprint(args...))
}
func (l *testLogger) Errorf(format string, args ...interface{}) {
fmt.Fprintf(l.buf, "Error(%v)\n", fmt.Sprintf(format, args...))
}
func (l *testLogger) Fatal(args ...interface{}) {
fmt.Fprintf(l.buf, "Fatal(%v)\n", fmt.Sprint(args...))
}
func (l *testLogger) Fatalf(format string, args ...interface{}) {
fmt.Fprintf(l.buf, "Fatal(%v)\n", fmt.Sprintf(format, args...))
}

View File

@ -0,0 +1,140 @@
package wasm
import (
"fmt"
"io"
"net/http"
"net/url"
"testing"
dapr "github.com/dapr/components-contrib/middleware"
"github.com/dapr/kit/logger"
"github.com/dapr/components-contrib/internal/httputils"
"github.com/dapr/components-contrib/metadata"
)
const parallel = 10
func BenchmarkNative(b *testing.B) {
benchmarkAll(b, func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if httputils.RequestURI(r) == "/v1.0/hi?name=panda" {
httputils.SetRequestURI(r, "/v1.0/hello?name=teddy")
}
next.ServeHTTP(w, r)
})
})
}
func BenchmarkTinygo(b *testing.B) {
path := "./internal/e2e-guests/rewrite/main.wasm"
benchmarkMiddleware(b, path)
}
// BenchmarkWat gives baseline performance for the same handler by
// writing it directly in WebAssembly Text Format.
func BenchmarkWat(b *testing.B) {
path := "./internal/testdata/rewrite.wasm"
benchmarkMiddleware(b, path)
}
func benchmarkMiddleware(b *testing.B, path string) {
md := metadata.Base{Properties: map[string]string{"path": path}}
l := logger.NewLogger(b.Name())
l.SetOutput(io.Discard)
handlerFn, err := NewMiddleware(l).GetHandler(dapr.Metadata{Base: md})
if err != nil {
b.Fatal(err)
}
benchmarkAll(b, handlerFn)
}
var benches = map[string]struct {
newRequest func() *http.Request
test http.Handler
}{
"rewrite": {
newRequest: func() *http.Request {
u, err := url.Parse("https://test.io/v1.0/hi?name=panda")
if err != nil {
panic(err)
}
return &http.Request{
Method: http.MethodGet,
URL: u,
Host: "test.io",
Header: map[string][]string{
"User-Agent": {"curl/7.79.1"},
"Accept": {"*/*"},
},
}
},
test: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if method := r.Method; method != http.MethodGet {
body := fmt.Sprintf("Unexpected request method: %q", method)
httputils.RespondWithErrorAndMessage(w, http.StatusInternalServerError, body)
}
if path := r.URL.Path; path != "/v1.0/hello" {
body := fmt.Sprintf("Expected wasm to rewrite path: %s", path)
httputils.RespondWithErrorAndMessage(w, http.StatusInternalServerError, body)
}
if query := r.URL.RawQuery; query != "name=teddy" {
body := fmt.Sprintf("Expected wasm to retain query: %s", query)
httputils.RespondWithErrorAndMessage(w, http.StatusInternalServerError, body)
}
w.Header().Set("Content-Type", "text/plain")
if _, err := w.Write([]byte("Hello, world!")); err != nil {
panic(err)
}
}),
},
}
func benchmarkAll(b *testing.B, mw func(http.Handler) http.Handler) {
for n, s := range benches {
b.Run(n, func(b *testing.B) {
b.SetParallelism(parallel)
benchmark(b, mw, n, s.newRequest, s.test)
})
}
}
func benchmark(
b *testing.B,
mw func(http.Handler) http.Handler,
name string,
newRequest func() *http.Request,
test http.Handler,
) {
h := mw(test)
b.Run(name, func(b *testing.B) {
// We don't report allocations because memory allocations for TinyGo are
// in wasm which isn't visible to the Go benchmark.
for i := 0; i < b.N; i++ {
h.ServeHTTP(fakeResponseWriter{}, newRequest())
}
})
}
var _ http.ResponseWriter = fakeResponseWriter{}
type fakeResponseWriter struct{}
func (rw fakeResponseWriter) Header() http.Header {
return http.Header{}
}
func (rw fakeResponseWriter) Write(b []byte) (int, error) {
return len(b), nil
}
func (rw fakeResponseWriter) WriteHeader(statusCode int) {
// None of our benchmark tests should send failure status. If there's a
// failure, it is likely there's a problem in the test data.
if statusCode != 200 {
panic(statusCode)
}
}

View File

@ -0,0 +1,3 @@
build:
@echo "Building Example Guest Wasm"
@tinygo build -o router.wasm -scheduler=none --no-debug -target=wasi router.go

View File

@ -2,6 +2,6 @@ module github.com/dapr/components-contrib/middleware/wasm/example
go 1.19 go 1.19
require github.com/wapc/wapc-guest-tinygo v0.3.3
replace github.com/dapr/dapr => github.com/1046102779/dapr v0.0.0-20221021130037-635b70c24259 replace github.com/dapr/dapr => github.com/1046102779/dapr v0.0.0-20221021130037-635b70c24259
require github.com/http-wasm/http-wasm-guest-tinygo v0.1.0

View File

@ -0,0 +1,2 @@
github.com/http-wasm/http-wasm-guest-tinygo v0.1.0 h1:vcYHJkbfQ2G0bD/zupIzHe/h1LZQJiVGdn5eZZTJM88=
github.com/http-wasm/http-wasm-guest-tinygo v0.1.0/go.mod h1:/3UO8OXP9nxe7d2qJ5ifTVkqM7KjaXxUZLoqBsDXpy0=

View File

@ -0,0 +1,27 @@
package main
import (
"strings"
"github.com/http-wasm/http-wasm-guest-tinygo/handler"
"github.com/http-wasm/http-wasm-guest-tinygo/handler/api"
)
func main() {
handler.HandleRequestFn = handleRequest
}
// handleRequest implements a simple HTTP router.
func handleRequest(req api.Request, resp api.Response) (next bool, reqCtx uint32) {
// If the URI starts with /host, trim it and dispatch to the next handler.
if uri := req.GetURI(); strings.HasPrefix(uri, "/host") {
req.SetURI(uri[5:])
next = true // proceed to the next handler on the host.
return
}
// Serve a static response
resp.Headers().Set("Content-Type", "text/plain")
resp.Body().WriteString("hello")
return // skip the next handler, as we wrote a response.
}

Binary file not shown.

View File

@ -0,0 +1,169 @@
package wasm
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"github.com/http-wasm/http-wasm-host-go/handler"
wasmnethttp "github.com/http-wasm/http-wasm-host-go/handler/nethttp"
"github.com/http-wasm/http-wasm-host-go/api"
"github.com/tetratelabs/wazero"
dapr "github.com/dapr/components-contrib/middleware"
"github.com/dapr/kit/logger"
)
// ctx substitutes for context propagation until middleware APIs support it.
var ctx = context.Background()
// middlewareMetadata includes configuration used for the WebAssembly handler.
// Detailed notes are in README.md for visibility.
//
// Note: When changing this, you must update the docs with summary comments per
// field.
// https://github.com/dapr/docs/blob/v1.9/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md
type middlewareMetadata struct {
// Path is where to load a `%.wasm` file that implements the guest side of
// the handler protocol. No default.
Path string `json:"path"`
// guest is WebAssembly binary implementing the waPC guest, loaded from Path.
guest []byte
}
type middleware struct {
logger logger.Logger
}
func NewMiddleware(logger logger.Logger) dapr.Middleware {
return &middleware{logger: logger}
}
func (m *middleware) GetHandler(metadata dapr.Metadata) (func(next http.Handler) http.Handler, error) {
rh, err := m.getHandler(metadata)
if err != nil {
return nil, err
}
return rh.requestHandler, nil
}
// getHandler is extracted for unit testing.
func (m *middleware) getHandler(metadata dapr.Metadata) (*requestHandler, error) {
meta, err := m.getMetadata(metadata)
if err != nil {
return nil, fmt.Errorf("wasm basic: failed to parse metadata: %w", err)
}
var stdout, stderr bytes.Buffer
mw, err := wasmnethttp.NewMiddleware(ctx, meta.guest,
handler.Logger(m),
handler.ModuleConfig(wazero.NewModuleConfig().
WithStdout(&stdout). // reset per request
WithStderr(&stderr). // reset per request
// The below violate sand-boxing, but allow code to behave as expected.
WithRandSource(rand.Reader).
WithSysNanosleep().
WithSysWalltime().
WithSysNanosleep()))
if err != nil {
return nil, err
}
return &requestHandler{mw: mw, logger: m.logger, stdout: &stdout, stderr: &stderr}, nil
}
// IsEnabled implements the same method as documented on api.Logger.
func (m *middleware) IsEnabled(level api.LogLevel) bool {
var l logger.LogLevel
switch level {
case api.LogLevelError:
l = logger.ErrorLevel
case api.LogLevelWarn:
l = logger.WarnLevel
case api.LogLevelInfo:
l = logger.InfoLevel
case api.LogLevelDebug:
l = logger.DebugLevel
default: // same as api.LogLevelNone
return false
}
return m.logger.IsOutputLevelEnabled(l)
}
// Log implements the same method as documented on api.Logger.
func (m *middleware) Log(_ context.Context, level api.LogLevel, message string) {
switch level {
case api.LogLevelError:
m.logger.Error(message)
case api.LogLevelWarn:
m.logger.Warn(message)
case api.LogLevelInfo:
m.logger.Info(message)
case api.LogLevelDebug:
m.logger.Debug(message)
default: // same as api.LogLevelNone
return
}
}
func (m *middleware) getMetadata(metadata dapr.Metadata) (*middlewareMetadata, error) {
b, err := json.Marshal(metadata.Properties)
if err != nil {
return nil, err
}
var data middlewareMetadata
err = json.Unmarshal(b, &data)
if err != nil {
return nil, err
}
if data.Path == "" {
return nil, errors.New("missing path")
}
data.guest, err = os.ReadFile(data.Path)
if err != nil {
return nil, fmt.Errorf("error reading path: %w", err)
}
return &data, nil
}
type requestHandler struct {
mw wasmnethttp.Middleware
logger logger.Logger
stdout, stderr *bytes.Buffer
}
func (rh *requestHandler) requestHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h := rh.mw.NewHandler(r.Context(), next)
defer func() {
rh.stdout.Reset()
rh.stderr.Reset()
}()
h.ServeHTTP(w, r)
if stdout := rh.stdout.String(); len(stdout) > 0 {
rh.logger.Debugf("wasm stdout: %s", stdout)
}
if stderr := rh.stderr.String(); len(stderr) > 0 {
rh.logger.Debugf("wasm stderr: %s", stderr)
}
})
}
// Close implements io.Closer
func (rh *requestHandler) Close() error {
return rh.mw.Close(ctx)
}

View File

@ -0,0 +1,148 @@
package wasm
import (
"bytes"
_ "embed"
"net/http"
"net/http/httptest"
"testing"
"github.com/dapr/components-contrib/internal/httputils"
"github.com/dapr/components-contrib/metadata"
"github.com/http-wasm/http-wasm-host-go/api"
"github.com/stretchr/testify/require"
dapr "github.com/dapr/components-contrib/middleware"
"github.com/dapr/kit/logger"
)
func Test_NewMiddleWare(t *testing.T) {
l := logger.NewLogger(t.Name())
require.Equal(t, &middleware{logger: l}, NewMiddleware(l))
}
func Test_middleware_log(t *testing.T) {
l := logger.NewLogger(t.Name())
var buf bytes.Buffer
l.SetOutput(&buf)
m := &middleware{logger: l}
message := "alert"
m.Log(ctx, api.LogLevelInfo, message)
require.Contains(t, buf.String(), `level=info msg=alert`)
}
func Test_middleware_getMetadata(t *testing.T) {
m := &middleware{}
type testCase struct {
name string
metadata metadata.Base
expected *middlewareMetadata
expectedErr string
}
tests := []testCase{
{
name: "empty path",
metadata: metadata.Base{Properties: map[string]string{}},
expectedErr: "missing path",
},
{
name: "path dir not file",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example",
}},
// Below ends in "is a directory" in unix, and "The handle is invalid." in windows.
expectedErr: "error reading path: read ./example: ",
},
}
for _, tt := range tests {
tc := tt
t.Run(tc.name, func(t *testing.T) {
md, err := m.getMetadata(dapr.Metadata{Base: tc.metadata})
if tc.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, tc.expected, md)
} else {
// Use substring match as the error can be different in Windows.
require.Contains(t, err.Error(), tc.expectedErr)
}
})
}
}
func Test_middleware_getHandler(t *testing.T) {
m := &middleware{logger: logger.NewLogger(t.Name())}
type testCase struct {
name string
metadata metadata.Base
expectedErr string
}
tests := []testCase{
// This just tests the error message prefixes properly. Otherwise, it is
// redundant to Test_middleware_getMetadata
{
name: "requires path metadata",
metadata: metadata.Base{Properties: map[string]string{}},
expectedErr: "wasm basic: failed to parse metadata: missing path",
},
// This is more than Test_middleware_getMetadata, as it ensures the
// contents are actually wasm.
{
name: "path not wasm",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/router.go",
}},
expectedErr: "wasm: error compiling guest: invalid binary",
},
{
name: "ok",
metadata: metadata.Base{Properties: map[string]string{
"path": "./example/router.wasm",
}},
},
}
for _, tt := range tests {
tc := tt
t.Run(tc.name, func(t *testing.T) {
h, err := m.getHandler(dapr.Metadata{Base: tc.metadata})
if tc.expectedErr == "" {
require.NoError(t, err)
require.NotNil(t, h.mw)
} else {
require.EqualError(t, err, tc.expectedErr)
}
})
}
}
func Test_Example(t *testing.T) {
l := logger.NewLogger(t.Name())
var buf bytes.Buffer
l.SetOutput(&buf)
meta := metadata.Base{Properties: map[string]string{
// router.wasm was compiled via the following:
// tinygo build -o router.wasm -scheduler=none --no-debug -target=wasi router.go`
"path": "./example/router.wasm",
}}
handlerFn, err := NewMiddleware(l).GetHandler(dapr.Metadata{Base: meta})
require.NoError(t, err)
handler := handlerFn(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
r := httptest.NewRequest(http.MethodGet, "/host/hi?name=panda", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
require.Equal(t, "/hi?name=panda", httputils.RequestURI(r))
require.Empty(t, buf.String())
}

View File

@ -2,6 +2,6 @@ module github.com/dapr/components-contrib/middleware/wasm/internal
go 1.19 go 1.19
require github.com/wapc/wapc-guest-tinygo v0.3.3
replace github.com/dapr/dapr => github.com/1046102779/dapr v0.0.0-20221021130037-635b70c24259 replace github.com/dapr/dapr => github.com/1046102779/dapr v0.0.0-20221021130037-635b70c24259
require github.com/http-wasm/http-wasm-guest-tinygo v0.1.0

View File

@ -0,0 +1,2 @@
github.com/http-wasm/http-wasm-guest-tinygo v0.1.0 h1:vcYHJkbfQ2G0bD/zupIzHe/h1LZQJiVGdn5eZZTJM88=
github.com/http-wasm/http-wasm-guest-tinygo v0.1.0/go.mod h1:/3UO8OXP9nxe7d2qJ5ifTVkqM7KjaXxUZLoqBsDXpy0=

View File

@ -6,22 +6,24 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/wapc/wapc-guest-tinygo" "github.com/http-wasm/http-wasm-guest-tinygo/handler"
"github.com/http-wasm/http-wasm-guest-tinygo/handler/api"
) )
func main() { func main() {
fmt.Fprintln(os.Stdout, "main Stdout") fmt.Fprintln(os.Stdout, "main Stdout")
fmt.Fprintln(os.Stderr, "main Stderr") fmt.Fprintln(os.Stderr, "main Stderr")
wapc.ConsoleLog("main ConsoleLog") handler.Host.Log(api.LogLevelInfo, "main ConsoleLog")
wapc.RegisterFunctions(wapc.Functions{"rewrite": rewrite}) handler.HandleRequestFn = log
} }
var requestCount int var requestCount int
func rewrite(requestURI []byte) ([]byte, error) { func log(api.Request, api.Response) (next bool, reqCtx uint32) {
fmt.Fprintf(os.Stdout, "request[%d] Stdout\n", requestCount) fmt.Fprintf(os.Stdout, "request[%d] Stdout\n", requestCount)
fmt.Fprintf(os.Stderr, "request[%d] Stderr\n", requestCount) fmt.Fprintf(os.Stderr, "request[%d] Stderr\n", requestCount)
wapc.ConsoleLog(fmt.Sprintf("request[%d] ConsoleLog", requestCount)) handler.Host.Log(api.LogLevelInfo, fmt.Sprintf("request[%d] ConsoleLog", requestCount))
requestCount++ requestCount++
return requestURI, nil next = true
return
} }

Binary file not shown.

View File

@ -0,0 +1,21 @@
package main
import (
"github.com/http-wasm/http-wasm-guest-tinygo/handler"
"github.com/http-wasm/http-wasm-guest-tinygo/handler/api"
)
func main() {
handler.HandleRequestFn = handleRequest
}
// handle rewrites the request URI before dispatching to the next handler.
//
// Note: This is not a redirect, rather in-process routing.
func handleRequest(req api.Request, resp api.Response) (next bool, reqCtx uint32) {
if req.GetURI() == "/v1.0/hi?name=panda" {
req.SetURI("/v1.0/hello?name=teddy")
}
next = true
return
}

Binary file not shown.

View File

@ -0,0 +1,148 @@
package internal_test
import (
"bytes"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"testing"
"github.com/dapr/components-contrib/middleware/http/wasm"
"github.com/dapr/kit/logger"
"github.com/dapr/components-contrib/metadata"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/middleware"
)
var guestWasm map[string][]byte
const (
guestWasmOutput = "output"
guestWasmRewrite = "rewrite"
)
// TestMain ensures we can read the test wasm prior to running e2e tests.
func TestMain(m *testing.M) {
wasms := []string{guestWasmOutput, guestWasmRewrite}
guestWasm = make(map[string][]byte, len(wasms))
for _, name := range wasms {
if wasm, err := os.ReadFile(path.Join("e2e-guests", name, "main.wasm")); err != nil {
log.Panicln(err)
} else {
guestWasm[name] = wasm
}
}
os.Exit(m.Run())
}
func Test_EndToEnd(t *testing.T) {
l := logger.NewLogger(t.Name())
var buf bytes.Buffer
l.SetOutputLevel(logger.DebugLevel)
l.SetOutput(&buf)
type testCase struct {
name string
guest []byte
test func(t *testing.T, handler http.Handler, log *bytes.Buffer)
}
tests := []testCase{
{
name: "consoleLog stdout and stderr",
guest: guestWasm[guestWasmOutput],
test: func(t *testing.T, handler http.Handler, log *bytes.Buffer) {
var w http.ResponseWriter
var r http.Request
handler.ServeHTTP(w, &r)
// First, we expect any console logging written inline from
// init (main) and the request[0-9] funcs to info level.
//
// Then, we expect to see stdout and stderr from both scopes
// at debug level.
for _, s := range []string{
`level=info msg="main ConsoleLog"`,
`level=info msg="request[0] ConsoleLog"`,
`level=debug msg="wasm stdout: main Stdout\nrequest[0] Stdout\n"`,
`level=debug msg="wasm stderr: main Stderr\nrequest[0] Stderr\n"`,
} {
require.Contains(t, log.String(), s)
}
},
},
{
name: "consoleLog multiple requests",
guest: guestWasm[guestWasmOutput],
test: func(t *testing.T, handler http.Handler, log *bytes.Buffer) {
// Service more requests than one to ensure pooling works properly.
for i := 0; i < 3; i++ {
r := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
}
// We expect to see initialization (main) twice, once for each
// module in the pool. We expect to see request[1] which shows
// round-robin back to the first module in the pool.
for _, s := range []string{
`level=info msg="main ConsoleLog"`,
`level=info msg="request[0] ConsoleLog"`,
`level=debug msg="wasm stdout: main Stdout\nrequest[0] Stdout\n"`,
`level=debug msg="wasm stderr: main Stderr\nrequest[0] Stderr\n"`,
`level=info msg="request[1] ConsoleLog"`,
`level=debug msg="wasm stdout: request[1] Stdout\n"`,
`level=debug msg="wasm stderr: request[1] Stderr\n"`,
} {
require.Contains(t, log.String(), s)
}
},
},
{
name: "rewrite",
guest: guestWasm[guestWasmRewrite],
test: func(t *testing.T, handler http.Handler, log *bytes.Buffer) {
var w http.ResponseWriter
u, err := url.Parse("https://test.io/v1.0/hi?name=panda")
if err != nil {
panic(err)
}
r := &http.Request{
Method: http.MethodGet,
URL: u,
Host: "test.io",
Header: map[string][]string{
"User-Agent": {"curl/7.79.1"},
"Accept": {"*/*"},
},
}
handler.ServeHTTP(w, r)
require.Equal(t, "/v1.0/hello?name=teddy", r.URL.RequestURI())
},
},
}
for _, tt := range tests {
tc := tt
t.Run(tc.name, func(t *testing.T) {
defer buf.Reset()
wasmPath := path.Join(t.TempDir(), "guest.wasm")
require.NoError(t, os.WriteFile(wasmPath, tc.guest, 0o600))
meta := metadata.Base{Properties: map[string]string{"path": wasmPath}}
handlerFn, err := wasm.NewMiddleware(l).GetHandler(middleware.Metadata{Base: meta})
require.NoError(t, err)
handler := handlerFn(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
tc.test(t, handler, &buf)
})
}
}

Binary file not shown.

View File

@ -0,0 +1,92 @@
;; This is the same logic as ../e2e-guests/rewrite/main.go, but written in
;; WebAssembly to establish baseline performance. For example, TinyGo should be
;; slower than this, but other languages are unlikley to be faster.
(module $rewrite
;; get_uri writes the request URI value to memory, if it isn't larger than
;; the buffer size limit. The result is the actual URI length in bytes.
(import "http_handler" "get_uri" (func $get_uri
(param $buf i32) (param $buf_limit i32)
(result (; uri_len ;) i32)))
;; set_uri overwrites the request URI with one read from memory.
(import "http_handler" "set_uri" (func $set_uri
(param $uri i32) (param $uri_len i32)))
;; http-wasm guests are required to export "memory", so that imported
;; functions like "log" can read memory.
(memory (export "memory") 1 (; 1 page==64KB ;))
;; define the URI we expect to rewrite
(global $match_uri i32 (i32.const 0))
(data (i32.const 0) "/v1.0/hi?name=panda")
(global $match_uri_len i32 (i32.const 19))
;; define the URI we expect to rewrite
(global $new_uri i32 (i32.const 32))
(data (i32.const 32) "/v1.0/hello?name=teddy")
(global $new_uri_len i32 (i32.const 22))
;; buf is an arbitrary area to write data.
(global $buf i32 (i32.const 1024))
;; clear_buf clears any memory that may have been written.
(func $clear_buf
(memory.fill
(global.get $buf)
(global.get $match_uri_len)
(i32.const 0)))
;; handle rewrites the HTTP request URI
(func (export "handle_request") (result (; ctx_next ;) i64)
(local $uri_len i32)
;; First, read the uri into memory if not larger than our limit.
;; uri_len = get_uri(uri, match_uri_len)
(local.set $uri_len
(call $get_uri (global.get $buf) (global.get $match_uri_len)))
;; Next, if the length read is the same as our match uri, check to see if
;; the characters are the same.
;; if uri_len != match_uri_len { next() }
(if (i32.eq (local.get $uri_len) (global.get $match_uri_len))
(then (if (call $memeq ;; uri == match_uri
(global.get $buf)
(global.get $match_uri)
(global.get $match_uri_len)) (then
;; Call the imported function that sets the HTTP uri.
(call $set_uri ;; uri = new_uri
(global.get $new_uri)
(global.get $new_uri_len))))))
;; dispatch with the possibly rewritten uri.
(call $clear_buf)
(return (i64.const 1)))
;; handle_response is no-op as this is a request-only handler.
(func (export "handle_response") (param $reqCtx i32) (param $is_error i32))
;; memeq is like memcmp except it returns 0 (ne) or 1 (eq)
(func $memeq (param $ptr1 i32) (param $ptr2 i32) (param $len i32) (result i32)
(local $i1 i32)
(local $i2 i32)
(local.set $i1 (local.get $ptr1)) ;; i1 := ptr1
(local.set $i2 (local.get $ptr2)) ;; i2 := ptr1
(loop $len_gt_zero
;; if mem[i1] != mem[i2]
(if (i32.ne (i32.load8_u (local.get $i1)) (i32.load8_u (local.get $i2)))
(then (return (i32.const 0)))) ;; return 0
(local.set $i1 (i32.add (local.get $i1) (i32.const 1))) ;; i1++
(local.set $i2 (i32.add (local.get $i2) (i32.const 1))) ;; i2++
(local.set $len (i32.sub (local.get $len) (i32.const 1))) ;; $len--
;; if $len > 0 { continue } else { break }
(br_if $len_gt_zero (i32.gt_s (local.get $len) (i32.const 0))))
(i32.const 1)) ;; return 1
)

View File

@ -45,12 +45,15 @@ func (k *resolver) Init(metadata nameresolution.Metadata) error {
if err != nil { if err != nil {
return err return err
} }
if config, ok := configInterface.(map[string]string); ok { if config, ok := configInterface.(map[string]interface{}); ok {
clusterDomain := config[ClusterDomainKey] clusterDomainPtr := config[ClusterDomainKey]
if clusterDomainPtr != nil {
clusterDomain, _ := clusterDomainPtr.(string)
if clusterDomain != "" { if clusterDomain != "" {
k.clusterDomain = clusterDomain k.clusterDomain = clusterDomain
} }
} }
}
return nil return nil
} }

View File

@ -36,7 +36,7 @@ func TestResolve(t *testing.T) {
func TestResolveWithCustomClusterDomain(t *testing.T) { func TestResolveWithCustomClusterDomain(t *testing.T) {
resolver := NewResolver(logger.NewLogger("test")) resolver := NewResolver(logger.NewLogger("test"))
_ = resolver.Init(nameresolution.Metadata{ _ = resolver.Init(nameresolution.Metadata{
Configuration: map[string]string{ Configuration: map[string]interface{}{
"clusterDomain": "mydomain.com", "clusterDomain": "mydomain.com",
}, },
}) })

View File

@ -406,22 +406,6 @@ func (s *snsSqs) createSnsSqsSubscription(parentCtx context.Context, queueArn, t
return *subscribeOutput.SubscriptionArn, nil return *subscribeOutput.SubscriptionArn, nil
} }
func (s *snsSqs) removeSnsSqsSubscription(parentCtx context.Context, subscriptionArn string) error {
ctx, cancel := context.WithTimeout(parentCtx, s.opsTimeout)
_, err := s.snsClient.UnsubscribeWithContext(ctx, &sns.UnsubscribeInput{
SubscriptionArn: aws.String(subscriptionArn),
})
cancel()
if err != nil {
wrappedErr := fmt.Errorf("error unsubscribing to arn: %s %w", subscriptionArn, err)
s.logger.Error(wrappedErr)
return wrappedErr
}
return nil
}
func (s *snsSqs) getSnsSqsSubscriptionArn(parentCtx context.Context, topicArn string) (string, error) { func (s *snsSqs) getSnsSqsSubscriptionArn(parentCtx context.Context, topicArn string) (string, error) {
ctx, cancel := context.WithTimeout(parentCtx, s.opsTimeout) ctx, cancel := context.WithTimeout(parentCtx, s.opsTimeout)
listSubscriptionsOutput, err := s.snsClient.ListSubscriptionsByTopicWithContext(ctx, &sns.ListSubscriptionsByTopicInput{TopicArn: aws.String(topicArn)}) listSubscriptionsOutput, err := s.snsClient.ListSubscriptionsByTopicWithContext(ctx, &sns.ListSubscriptionsByTopicInput{TopicArn: aws.String(topicArn)})
@ -817,7 +801,7 @@ func (s *snsSqs) Subscribe(subscribeCtx context.Context, req pubsub.SubscribeReq
} }
// subscription creation is idempotent. Subscriptions are unique by topic/queue. // subscription creation is idempotent. Subscriptions are unique by topic/queue.
subscriptionArn, err := s.getOrCreateSnsSqsSubscription(subscribeCtx, queueInfo.arn, topicArn) _, err = s.getOrCreateSnsSqsSubscription(subscribeCtx, queueInfo.arn, topicArn)
if err != nil { if err != nil {
wrappedErr := fmt.Errorf("error subscribing topic: %s, to queue: %s, with error: %w", topicArn, queueInfo.arn, err) wrappedErr := fmt.Errorf("error subscribing topic: %s, to queue: %s, with error: %w", topicArn, queueInfo.arn, err)
s.logger.Error(wrappedErr) s.logger.Error(wrappedErr)
@ -855,13 +839,6 @@ func (s *snsSqs) Subscribe(subscribeCtx context.Context, req pubsub.SubscribeReq
// Remove the handler // Remove the handler
delete(s.topicHandlers, sanitizedName) delete(s.topicHandlers, sanitizedName)
// If we can perform management operations, remove the subscription entirely
if !s.metadata.disableEntityManagement {
// Use a background context because subscribeCtx is canceled already
// Error is logged already
_ = s.removeSnsSqsSubscription(s.ctx, subscriptionArn)
}
// If we don't have any topic left, close the poller // If we don't have any topic left, close the poller
if len(s.topicHandlers) == 0 { if len(s.topicHandlers) == 0 {
s.pollerCancel() s.pollerCancel()

View File

@ -52,11 +52,11 @@ func createIotHubPubsubMetadata() pubsub.Metadata {
metadata := pubsub.Metadata{ metadata := pubsub.Metadata{
Base: metadata.Base{ Base: metadata.Base{
Properties: map[string]string{ Properties: map[string]string{
connectionString: os.Getenv(iotHubConnectionStringEnvKey), "connectionString": os.Getenv(iotHubConnectionStringEnvKey),
consumerID: os.Getenv(iotHubConsumerGroupEnvKey), "consumerID": os.Getenv(iotHubConsumerGroupEnvKey),
storageAccountName: os.Getenv(storageAccountNameEnvKey), "storageAccountName": os.Getenv(storageAccountNameEnvKey),
storageAccountKey: os.Getenv(storageAccountKeyEnvKey), "storageAccountKey": os.Getenv(storageAccountKeyEnvKey),
storageContainerName: testStorageContainerName, "storageContainerName": testStorageContainerName,
}, },
}, },
} }

View File

@ -57,6 +57,9 @@ func (js *jetstreamPubSub) Init(metadata pubsub.Metadata) error {
} else if js.meta.tlsClientCert != "" && js.meta.tlsClientKey != "" { } else if js.meta.tlsClientCert != "" && js.meta.tlsClientKey != "" {
js.l.Debug("Configure nats for tls client authentication") js.l.Debug("Configure nats for tls client authentication")
opts = append(opts, nats.ClientCert(js.meta.tlsClientCert, js.meta.tlsClientKey)) opts = append(opts, nats.ClientCert(js.meta.tlsClientCert, js.meta.tlsClientKey))
} else if js.meta.token != "" {
js.l.Debug("Configure nats for token authentication")
opts = append(opts, nats.Token(js.meta.token))
} }
js.nc, err = nats.Connect(js.meta.natsURL, opts...) js.nc, err = nats.Connect(js.meta.natsURL, opts...)
@ -88,57 +91,78 @@ func (js *jetstreamPubSub) Features() []pubsub.Feature {
} }
func (js *jetstreamPubSub) Publish(req *pubsub.PublishRequest) error { func (js *jetstreamPubSub) Publish(req *pubsub.PublishRequest) error {
js.l.Debugf("Publishing topic %v with data: %v", req.Topic, req.Data) var opts []nats.PubOpt
_, err := js.jsc.Publish(req.Topic, req.Data) var msgID string
event, err := pubsub.FromCloudEvent(req.Data, "", "", "", "")
if err != nil {
js.l.Debugf("error unmarshalling cloudevent: %v", err)
} else {
// Use the cloudevent id as the Nats-MsgId for deduplication
if id, ok := event["id"].(string); ok {
msgID = id
opts = append(opts, nats.MsgId(msgID))
}
}
if msgID == "" {
js.l.Warn("empty message ID, Jetstream deduplication will not be possible")
}
js.l.Debugf("Publishing to topic %v id: %s", req.Topic, msgID)
_, err = js.jsc.Publish(req.Topic, req.Data, opts...)
return err return err
} }
func (js *jetstreamPubSub) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error { func (js *jetstreamPubSub) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
var opts []nats.SubOpt var consumerConfig nats.ConsumerConfig
consumerConfig.DeliverSubject = nats.NewInbox()
if v := js.meta.durableName; v != "" { if v := js.meta.durableName; v != "" {
opts = append(opts, nats.Durable(v)) consumerConfig.Durable = v
} }
if v := js.meta.startTime; !v.IsZero() { if v := js.meta.startTime; !v.IsZero() {
opts = append(opts, nats.StartTime(v)) consumerConfig.OptStartTime = &v
} else if v := js.meta.startSequence; v > 0 { } else if v := js.meta.startSequence; v > 0 {
opts = append(opts, nats.StartSequence(v)) consumerConfig.OptStartSeq = v
} else if js.meta.deliverAll { } else if js.meta.deliverAll {
opts = append(opts, nats.DeliverAll()) consumerConfig.DeliverPolicy = nats.DeliverAllPolicy
} else { } else {
opts = append(opts, nats.DeliverLast()) consumerConfig.DeliverPolicy = nats.DeliverLastPolicy
} }
if js.meta.flowControl { if js.meta.flowControl {
opts = append(opts, nats.EnableFlowControl()) consumerConfig.FlowControl = true
} }
if js.meta.ackWait != 0 { if js.meta.ackWait != 0 {
opts = append(opts, nats.AckWait(js.meta.ackWait)) consumerConfig.AckWait = js.meta.ackWait
} }
if js.meta.maxDeliver != 0 { if js.meta.maxDeliver != 0 {
opts = append(opts, nats.MaxDeliver(js.meta.maxDeliver)) consumerConfig.MaxDeliver = js.meta.maxDeliver
} }
if len(js.meta.backOff) != 0 { if len(js.meta.backOff) != 0 {
opts = append(opts, nats.BackOff(js.meta.backOff)) consumerConfig.BackOff = js.meta.backOff
} }
if js.meta.maxAckPending != 0 { if js.meta.maxAckPending != 0 {
opts = append(opts, nats.MaxAckPending(js.meta.maxAckPending)) consumerConfig.MaxAckPending = js.meta.maxAckPending
} }
if js.meta.replicas != 0 { if js.meta.replicas != 0 {
opts = append(opts, nats.ConsumerReplicas(js.meta.replicas)) consumerConfig.Replicas = js.meta.replicas
} }
if js.meta.memoryStorage { if js.meta.memoryStorage {
opts = append(opts, nats.ConsumerMemoryStorage()) consumerConfig.MemoryStorage = true
} }
if js.meta.rateLimit != 0 { if js.meta.rateLimit != 0 {
opts = append(opts, nats.RateLimit(js.meta.rateLimit)) consumerConfig.RateLimit = js.meta.rateLimit
} }
if js.meta.hearbeat != 0 { if js.meta.hearbeat != 0 {
opts = append(opts, nats.IdleHeartbeat(js.meta.hearbeat)) consumerConfig.Heartbeat = js.meta.hearbeat
} }
consumerConfig.FilterSubject = req.Topic
natsHandler := func(m *nats.Msg) { natsHandler := func(m *nats.Msg) {
jsm, err := m.Metadata() jsm, err := m.Metadata()
@ -176,14 +200,27 @@ func (js *jetstreamPubSub) Subscribe(ctx context.Context, req pubsub.SubscribeRe
} }
var err error var err error
streamName := js.meta.streamName
if streamName == "" {
streamName, err = js.jsc.StreamNameBySubject(req.Topic)
if err != nil {
return err
}
}
var subscription *nats.Subscription var subscription *nats.Subscription
consumerInfo, err := js.jsc.AddConsumer(streamName, &consumerConfig)
if err != nil {
return err
}
if queue := js.meta.queueGroupName; queue != "" { if queue := js.meta.queueGroupName; queue != "" {
js.l.Debugf("nats: subscribed to subject %s with queue group %s", js.l.Debugf("nats: subscribed to subject %s with queue group %s",
req.Topic, js.meta.queueGroupName) req.Topic, js.meta.queueGroupName)
subscription, err = js.jsc.QueueSubscribe(req.Topic, queue, natsHandler, opts...) subscription, err = js.jsc.QueueSubscribe(req.Topic, queue, natsHandler, nats.Bind(streamName, consumerInfo.Name))
} else { } else {
js.l.Debugf("nats: subscribed to subject %s", req.Topic) js.l.Debugf("nats: subscribed to subject %s", req.Topic)
subscription, err = js.jsc.Subscribe(req.Topic, natsHandler, opts...) subscription, err = js.jsc.Subscribe(req.Topic, natsHandler, nats.Bind(streamName, consumerInfo.Name))
} }
if err != nil { if err != nil {
return err return err

View File

@ -27,11 +27,13 @@ type metadata struct {
jwt string jwt string
seedKey string seedKey string
token string
tlsClientCert string tlsClientCert string
tlsClientKey string tlsClientKey string
name string name string
streamName string
durableName string durableName string
queueGroupName string queueGroupName string
startSequence uint64 startSequence uint64
@ -57,6 +59,7 @@ func parseMetadata(psm pubsub.Metadata) (metadata, error) {
return metadata{}, fmt.Errorf("missing nats URL") return metadata{}, fmt.Errorf("missing nats URL")
} }
m.token = psm.Properties["token"]
m.jwt = psm.Properties["jwt"] m.jwt = psm.Properties["jwt"]
m.seedKey = psm.Properties["seedKey"] m.seedKey = psm.Properties["seedKey"]
@ -141,5 +144,7 @@ func parseMetadata(psm pubsub.Metadata) (metadata, error) {
m.hearbeat = v m.hearbeat = v
} }
m.streamName = psm.Properties["streamName"]
return m, nil return m, nil
} }

View File

@ -71,6 +71,50 @@ func TestParseMetadata(t *testing.T) {
}, },
expectErr: false, expectErr: false,
}, },
{
desc: "Valid Metadata with token",
input: pubsub.Metadata{Base: mdata.Base{
Properties: map[string]string{
"natsURL": "nats://localhost:4222",
"name": "myName",
"durableName": "myDurable",
"queueGroupName": "myQueue",
"startSequence": "1",
"startTime": "1629328511",
"deliverAll": "true",
"flowControl": "true",
"ackWait": "2s",
"maxDeliver": "10",
"backOff": "500ms, 2s, 10s",
"maxAckPending": "5000",
"replicas": "3",
"memoryStorage": "true",
"rateLimit": "20000",
"hearbeat": "1s",
"token": "myToken",
},
}},
want: metadata{
natsURL: "nats://localhost:4222",
name: "myName",
durableName: "myDurable",
queueGroupName: "myQueue",
startSequence: 1,
startTime: time.Unix(1629328511, 0),
deliverAll: true,
flowControl: true,
ackWait: 2 * time.Second,
maxDeliver: 10,
backOff: []time.Duration{time.Millisecond * 500, time.Second * 2, time.Second * 10},
maxAckPending: 5000,
replicas: 3,
memoryStorage: true,
rateLimit: 20000,
hearbeat: time.Second * 1,
token: "myToken",
},
expectErr: false,
},
{ {
desc: "Invalid metadata with missing seed key", desc: "Invalid metadata with missing seed key",
input: pubsub.Metadata{Base: mdata.Base{ input: pubsub.Metadata{Base: mdata.Base{
@ -126,7 +170,7 @@ func TestParseMetadata(t *testing.T) {
expectErr: true, expectErr: true,
}, },
{ {
desc: "Invalid metadata with missing tls client client", desc: "Invalid metadata with missing tls client",
input: pubsub.Metadata{Base: mdata.Base{ input: pubsub.Metadata{Base: mdata.Base{
Properties: map[string]string{ Properties: map[string]string{
"natsURL": "nats://localhost:4222", "natsURL": "nats://localhost:4222",

81
pubsub/kubemq/kubemq.go Normal file
View File

@ -0,0 +1,81 @@
package kubemq
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
type kubeMQ struct {
metadata *metadata
logger logger.Logger
ctx context.Context
ctxCancel context.CancelFunc
eventsClient *kubeMQEvents
eventStoreClient *kubeMQEventStore
}
func NewKubeMQ(logger logger.Logger) pubsub.PubSub {
return &kubeMQ{
logger: logger,
}
}
func (k *kubeMQ) Init(metadata pubsub.Metadata) error {
meta, err := createMetadata(metadata)
if err != nil {
k.logger.Errorf("error init kubemq client error: %s", err.Error())
return err
}
k.metadata = meta
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
if meta.isStore {
k.eventStoreClient = newKubeMQEventsStore(k.logger)
_ = k.eventStoreClient.Init(meta)
} else {
k.eventsClient = newkubeMQEvents(k.logger)
_ = k.eventsClient.Init(meta)
}
return nil
}
func (k *kubeMQ) Features() []pubsub.Feature {
return nil
}
func (k *kubeMQ) Publish(req *pubsub.PublishRequest) error {
if k.metadata.isStore {
return k.eventStoreClient.Publish(req)
} else {
return k.eventsClient.Publish(req)
}
}
func (k *kubeMQ) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
if k.metadata.isStore {
return k.eventStoreClient.Subscribe(ctx, req, handler)
} else {
return k.eventsClient.Subscribe(ctx, req, handler)
}
}
func (k *kubeMQ) Close() error {
if k.metadata.isStore {
return k.eventStoreClient.Close()
} else {
return k.eventsClient.Close()
}
}
func getRandomID() string {
randomUUID, err := uuid.NewRandom()
if err != nil {
return fmt.Sprintf("%d", time.Now().UnixNano())
}
return randomUUID.String()
}

View File

@ -0,0 +1,177 @@
package kubemq
import (
"context"
"sync"
"time"
"github.com/kubemq-io/kubemq-go"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
type kubemqEventsClient interface {
Stream(ctx context.Context, onError func(err error)) (func(msg *kubemq.Event) error, error)
Subscribe(ctx context.Context, request *kubemq.EventsSubscription, onEvent func(msg *kubemq.Event, err error)) error
Close() error
}
type kubeMQEvents struct {
lock sync.RWMutex
client kubemqEventsClient
metadata *metadata
logger logger.Logger
publishFunc func(event *kubemq.Event) error
resultChan chan error
waitForResultTimeout time.Duration
ctx context.Context
ctxCancel context.CancelFunc
isInitialized bool
}
func newkubeMQEvents(logger logger.Logger) *kubeMQEvents {
return &kubeMQEvents{
client: nil,
metadata: nil,
logger: logger,
publishFunc: nil,
resultChan: make(chan error, 1),
waitForResultTimeout: 60 * time.Second,
ctx: nil,
ctxCancel: nil,
isInitialized: false,
}
}
func (k *kubeMQEvents) init() error {
k.lock.RLock()
isInit := k.isInitialized
k.lock.RUnlock()
if isInit {
return nil
}
k.lock.Lock()
defer k.lock.Unlock()
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
clientID := k.metadata.clientID
if clientID == "" {
clientID = getRandomID()
}
client, err := kubemq.NewEventsClient(k.ctx,
kubemq.WithAddress(k.metadata.host, k.metadata.port),
kubemq.WithClientId(clientID),
kubemq.WithTransportType(kubemq.TransportTypeGRPC),
kubemq.WithCheckConnection(true),
kubemq.WithAuthToken(k.metadata.authToken),
kubemq.WithAutoReconnect(true),
kubemq.WithReconnectInterval(time.Second))
if err != nil {
k.logger.Errorf("error init kubemq client error: %s", err.Error())
return err
}
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
k.client = client
if err := k.setPublishStream(); err != nil {
k.logger.Errorf("error init kubemq client error: %w", err.Error())
return err
}
k.isInitialized = true
return nil
}
func (k *kubeMQEvents) Init(meta *metadata) error {
k.metadata = meta
_ = k.init()
return nil
}
func (k *kubeMQEvents) setPublishStream() error {
var err error
k.publishFunc, err = k.client.Stream(k.ctx, func(err error) {
select {
case k.resultChan <- err:
default:
}
})
return err
}
func (k *kubeMQEvents) Publish(req *pubsub.PublishRequest) error {
if err := k.init(); err != nil {
return err
}
k.logger.Debugf("kubemq pub/sub: publishing message to %s", req.Topic)
event := &kubemq.Event{
Id: "",
Channel: req.Topic,
Metadata: "",
Body: req.Data,
ClientId: k.metadata.clientID,
Tags: map[string]string{},
}
if err := k.publishFunc(event); err != nil {
k.logger.Errorf("kubemq pub/sub error: publishing to %s failed with error: %s", req.Topic, err.Error())
return err
}
return nil
}
func (k *kubeMQEvents) Features() []pubsub.Feature {
return nil
}
func (k *kubeMQEvents) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
if err := k.init(); err != nil {
return err
}
clientID := k.metadata.clientID
if clientID == "" {
clientID = getRandomID()
}
k.logger.Debugf("kubemq pub/sub: subscribing to %s", req.Topic)
err := k.client.Subscribe(ctx, &kubemq.EventsSubscription{
Channel: req.Topic,
Group: k.metadata.group,
ClientId: clientID,
}, func(event *kubemq.Event, err error) {
if err != nil {
k.logger.Errorf("kubemq pub/sub error: subscribing to %s failed with error: %s", req.Topic, err.Error())
return
}
if ctx.Err() != nil {
return
}
msg := &pubsub.NewMessage{
Data: event.Body,
Topic: req.Topic,
}
if err := handler(k.ctx, msg); err != nil {
k.logger.Errorf("kubemq events pub/sub error: error handling message from topic '%s', %s", req.Topic, err.Error())
if k.metadata.disableReDelivery {
return
}
if err := k.Publish(&pubsub.PublishRequest{
Data: msg.Data,
Topic: msg.Topic,
}); err != nil {
k.logger.Errorf("kubemq pub/sub error: error resending message from topic '%s', %s", req.Topic, err.Error())
}
}
})
if err != nil {
k.logger.Errorf("kubemq events pub/sub error: error subscribing to topic '%s', %s", req.Topic, err.Error())
return err
}
time.Sleep(1 * time.Second)
k.logger.Debugf("kubemq pub/sub: subscribed to %s completed", req.Topic)
return nil
}
func (k *kubeMQEvents) Close() error {
if k.ctxCancel != nil {
k.ctxCancel()
}
return k.client.Close()
}

View File

@ -0,0 +1,205 @@
package kubemq
import (
"context"
"fmt"
"testing"
"time"
"github.com/kubemq-io/kubemq-go"
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
type kubemqEventsMock struct {
resultError error
subscribeErr error
resultCh chan error
publishError error
publishTimeout time.Duration
}
func (k *kubemqEventsMock) publish(msg *kubemq.Event) error {
if k.publishError != nil {
return k.publishError
}
go func() {
if k.publishTimeout > 0 {
time.Sleep(k.publishTimeout)
}
k.resultCh <- k.resultError
}()
return nil
}
func (k *kubemqEventsMock) Stream(ctx context.Context, onError func(err error)) (func(msg *kubemq.Event) error, error) {
go func() {
for {
select {
case <-ctx.Done():
return
case result := <-k.resultCh:
onError(result)
}
}
}()
return k.publish, nil
}
func (k *kubemqEventsMock) Subscribe(ctx context.Context, request *kubemq.EventsSubscription, onEvent func(msg *kubemq.Event, err error)) error {
return k.subscribeErr
}
func (k *kubemqEventsMock) Close() error {
return nil
}
func (k *kubemqEventsMock) setResultError(err error) *kubemqEventsMock {
k.resultError = err
return k
}
func (k *kubemqEventsMock) setSubscribeError(err error) *kubemqEventsMock {
k.subscribeErr = err
return k
}
func (k *kubemqEventsMock) setPublishTimeout(timeout time.Duration) *kubemqEventsMock {
k.publishTimeout = timeout
return k
}
func (k *kubemqEventsMock) setPublishError(err error) *kubemqEventsMock {
k.publishError = err
return k
}
func newKubemqEventsMock() *kubemqEventsMock {
return &kubemqEventsMock{
resultError: nil,
subscribeErr: nil,
resultCh: make(chan error, 1),
}
}
func Test_kubeMQEvents_Publish(t *testing.T) {
tests := []struct {
name string
req *pubsub.PublishRequest
timeout time.Duration
publishErr error
resultError error
wantErr bool
}{
{
name: "publish with no error",
req: &pubsub.PublishRequest{
Data: []byte("data"),
Topic: "some-topic",
},
resultError: nil,
wantErr: false,
},
{
name: "publish with publish error",
req: &pubsub.PublishRequest{
Data: []byte("data"),
Topic: "some-topic",
},
resultError: nil,
publishErr: fmt.Errorf("some error"),
wantErr: true,
},
}
for _, tt := range tests {
k := newkubeMQEvents(logger.NewLogger("kubemq-test"))
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
client := newKubemqEventsMock().
setResultError(tt.resultError).
setPublishError(tt.publishErr)
k.isInitialized = true
k.metadata = &metadata{
host: "",
port: 0,
clientID: "some-client-id",
authToken: "",
group: "",
isStore: false,
}
if tt.timeout > 0 {
k.waitForResultTimeout = tt.timeout - 1*time.Second
client.setPublishTimeout(tt.timeout)
}
k.client = client
_ = k.setPublishStream()
err := k.Publish(tt.req)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
_ = k.Features()
_ = k.Close()
}
}
func Test_kubeMQEvents_Subscribe(t *testing.T) {
tests := []struct {
name string
reqMsg *pubsub.NewMessage
subscribeError error
subscribeHandler pubsub.Handler
wantErr bool
}{
{
name: "subscribe with no error",
reqMsg: &pubsub.NewMessage{
Data: []byte("data"),
Topic: "some-topic",
},
subscribeHandler: func(ctx context.Context, msg *pubsub.NewMessage) error {
return nil
},
subscribeError: nil,
wantErr: false,
}, {
name: "subscribe with error",
reqMsg: &pubsub.NewMessage{
Data: []byte("data"),
Topic: "some-topic",
},
subscribeHandler: func(ctx context.Context, msg *pubsub.NewMessage) error {
return nil
},
subscribeError: fmt.Errorf("some error"),
wantErr: true,
},
}
for _, tt := range tests {
k := newkubeMQEvents(logger.NewLogger("kubemq-test"))
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
k.client = newKubemqEventsMock().
setSubscribeError(tt.subscribeError)
k.isInitialized = true
k.metadata = &metadata{
host: "",
port: 0,
clientID: "some-client-id",
authToken: "",
group: "",
isStore: false,
}
err := k.Subscribe(k.ctx, pubsub.SubscribeRequest{Topic: "some-topic"}, tt.subscribeHandler)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
_ = k.Features()
_ = k.Close()
}
}

View File

@ -0,0 +1,194 @@
package kubemq
import (
"context"
"fmt"
"sync"
"time"
"github.com/kubemq-io/kubemq-go"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
// interface used to allow unit testing.
type kubemqEventsStoreClient interface {
Stream(ctx context.Context, onResult func(result *kubemq.EventStoreResult, err error)) (func(msg *kubemq.EventStore) error, error)
Subscribe(ctx context.Context, request *kubemq.EventsStoreSubscription, onEvent func(msg *kubemq.EventStoreReceive, err error)) error
Close() error
}
type kubeMQEventStore struct {
lock sync.RWMutex
client kubemqEventsStoreClient
metadata *metadata
logger logger.Logger
publishFunc func(msg *kubemq.EventStore) error
resultChan chan *kubemq.EventStoreResult
waitForResultTimeout time.Duration
ctx context.Context
ctxCancel context.CancelFunc
isInitialized bool
}
func newKubeMQEventsStore(logger logger.Logger) *kubeMQEventStore {
return &kubeMQEventStore{
client: nil,
metadata: nil,
logger: logger,
publishFunc: nil,
resultChan: make(chan *kubemq.EventStoreResult, 1),
waitForResultTimeout: 60 * time.Second,
ctx: nil,
ctxCancel: nil,
isInitialized: false,
}
}
func (k *kubeMQEventStore) init() error {
k.lock.RLock()
isInit := k.isInitialized
k.lock.RUnlock()
if isInit {
return nil
}
k.lock.Lock()
defer k.lock.Unlock()
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
clientID := k.metadata.clientID
if clientID == "" {
clientID = getRandomID()
}
client, err := kubemq.NewEventsStoreClient(k.ctx,
kubemq.WithAddress(k.metadata.host, k.metadata.port),
kubemq.WithClientId(clientID),
kubemq.WithTransportType(kubemq.TransportTypeGRPC),
kubemq.WithCheckConnection(true),
kubemq.WithAuthToken(k.metadata.authToken),
kubemq.WithAutoReconnect(true),
kubemq.WithReconnectInterval(time.Second))
if err != nil {
k.logger.Errorf("error init kubemq client error: %s", err.Error())
return err
}
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
k.client = client
if err := k.setPublishStream(); err != nil {
k.logger.Errorf("error init kubemq client error: %w", err.Error())
return err
}
k.isInitialized = true
return nil
}
func (k *kubeMQEventStore) Init(meta *metadata) error {
k.metadata = meta
_ = k.init()
return nil
}
func (k *kubeMQEventStore) setPublishStream() error {
var err error
k.publishFunc, err = k.client.Stream(k.ctx, func(result *kubemq.EventStoreResult, err error) {
select {
case k.resultChan <- result:
default:
}
})
if err != nil {
return err
}
return nil
}
func (k *kubeMQEventStore) Publish(req *pubsub.PublishRequest) error {
if err := k.init(); err != nil {
return err
}
k.logger.Debugf("kubemq pub/sub: publishing message to %s", req.Topic)
event := &kubemq.EventStore{
Id: "",
Channel: req.Topic,
Metadata: "",
Body: req.Data,
ClientId: k.metadata.clientID,
Tags: map[string]string{},
}
if err := k.publishFunc(event); err != nil {
k.logger.Errorf("kubemq pub/sub error: publishing to %s failed with error: %s", req.Topic, err.Error())
return err
}
select {
case res := <-k.resultChan:
if res.Err != nil {
return res.Err
}
case <-time.After(k.waitForResultTimeout):
return fmt.Errorf("kubemq pub/sub error: timeout waiting for response")
}
return nil
}
func (k *kubeMQEventStore) Features() []pubsub.Feature {
return nil
}
func (k *kubeMQEventStore) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
if err := k.init(); err != nil {
return err
}
clientID := k.metadata.clientID
if clientID == "" {
clientID = getRandomID()
}
k.logger.Debugf("kubemq pub/sub: subscribing to %s", req.Topic)
err := k.client.Subscribe(ctx, &kubemq.EventsStoreSubscription{
Channel: req.Topic,
Group: k.metadata.group,
ClientId: clientID,
SubscriptionType: kubemq.StartFromNewEvents(),
}, func(event *kubemq.EventStoreReceive, err error) {
if err != nil {
k.logger.Errorf("kubemq pub/sub error: subscribing to %s failed with error: %s", req.Topic, err.Error())
return
}
if ctx.Err() != nil {
return
}
msg := &pubsub.NewMessage{
Data: event.Body,
Topic: req.Topic,
Metadata: nil,
ContentType: nil,
}
if err := handler(ctx, msg); err != nil {
k.logger.Errorf("kubemq pub/sub error: error handling message from topic '%s', %s, resending...", req.Topic, err.Error())
if k.metadata.disableReDelivery {
return
}
if err := k.Publish(&pubsub.PublishRequest{
Data: msg.Data,
Topic: msg.Topic,
}); err != nil {
k.logger.Errorf("kubemq pub/sub error: error resending message from topic '%s', %s", req.Topic, err.Error())
}
}
})
if err != nil {
k.logger.Errorf("kubemq pub/sub error: error subscribing to topic '%s', %s", req.Topic, err.Error())
return err
}
time.Sleep(1 * time.Second)
k.logger.Debugf("kubemq pub/sub: subscribed to %s completed", req.Topic)
return nil
}
func (k *kubeMQEventStore) Close() error {
if k.ctxCancel != nil {
k.ctxCancel()
}
return k.client.Close()
}

View File

@ -0,0 +1,228 @@
package kubemq
import (
"context"
"fmt"
"testing"
"time"
"github.com/kubemq-io/kubemq-go"
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
type kubemqEventsStoreMock struct {
resultError error
subscribeErr error
resultCh chan error
publishError error
publishTimeout time.Duration
}
func (k *kubemqEventsStoreMock) publish(msg *kubemq.EventStore) error {
if k.publishError != nil {
return k.publishError
}
go func() {
if k.publishTimeout > 0 {
time.Sleep(k.publishTimeout)
}
k.resultCh <- k.resultError
}()
return nil
}
func (k *kubemqEventsStoreMock) Stream(ctx context.Context, onResult func(result *kubemq.EventStoreResult, err error)) (func(msg *kubemq.EventStore) error, error) {
go func() {
for {
select {
case <-ctx.Done():
return
case result := <-k.resultCh:
onResult(&kubemq.EventStoreResult{
Id: "",
Sent: false,
Err: result,
}, nil)
}
}
}()
return k.publish, nil
}
func (k *kubemqEventsStoreMock) Subscribe(ctx context.Context, request *kubemq.EventsStoreSubscription, onEvent func(msg *kubemq.EventStoreReceive, err error)) error {
return k.subscribeErr
}
func (k *kubemqEventsStoreMock) Close() error {
return nil
}
func (k *kubemqEventsStoreMock) setResultError(err error) *kubemqEventsStoreMock {
k.resultError = err
return k
}
func (k *kubemqEventsStoreMock) setSubscribeError(err error) *kubemqEventsStoreMock {
k.subscribeErr = err
return k
}
func (k *kubemqEventsStoreMock) setPublishTimeout(timeout time.Duration) *kubemqEventsStoreMock {
k.publishTimeout = timeout
return k
}
func (k *kubemqEventsStoreMock) setPublishError(err error) *kubemqEventsStoreMock {
k.publishError = err
return k
}
func newKubemqEventsStoreMock() *kubemqEventsStoreMock {
return &kubemqEventsStoreMock{
resultError: nil,
subscribeErr: nil,
resultCh: make(chan error, 1),
}
}
func Test_kubeMQEventsStore_Publish(t *testing.T) {
tests := []struct {
name string
req *pubsub.PublishRequest
timeout time.Duration
publishErr error
resultError error
wantErr bool
}{
{
name: "publish with no error",
req: &pubsub.PublishRequest{
Data: []byte("data"),
Topic: "some-topic",
},
resultError: nil,
wantErr: false,
},
{
name: "publish with error",
req: &pubsub.PublishRequest{
Data: []byte("data"),
Topic: "some-topic",
},
resultError: fmt.Errorf("some error"),
wantErr: true,
},
{
name: "publish with timeout error",
req: &pubsub.PublishRequest{
Data: []byte("data"),
Topic: "some-topic",
},
resultError: nil,
timeout: 3 * time.Second,
wantErr: true,
},
{
name: "publish with publish error",
req: &pubsub.PublishRequest{
Data: []byte("data"),
Topic: "some-topic",
},
resultError: nil,
publishErr: fmt.Errorf("some error"),
wantErr: true,
},
}
for _, tt := range tests {
k := newKubeMQEventsStore(logger.NewLogger("kubemq-test"))
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
client := newKubemqEventsStoreMock().
setResultError(tt.resultError).
setPublishError(tt.publishErr)
k.isInitialized = true
k.metadata = &metadata{
host: "",
port: 0,
clientID: "some-client-id",
authToken: "",
group: "",
isStore: true,
}
if tt.timeout > 0 {
k.waitForResultTimeout = tt.timeout - 1*time.Second
client.setPublishTimeout(tt.timeout)
}
k.client = client
_ = k.setPublishStream()
err := k.Publish(tt.req)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
_ = k.Features()
_ = k.Close()
}
}
func Test_kubeMQkubeMQEventsStore_Subscribe(t *testing.T) {
tests := []struct {
name string
reqMsg *pubsub.NewMessage
subscribeError error
subscribeHandler pubsub.Handler
wantErr bool
}{
{
name: "subscribe with no error",
reqMsg: &pubsub.NewMessage{
Data: []byte("data"),
Topic: "some-topic",
},
subscribeHandler: func(ctx context.Context, msg *pubsub.NewMessage) error {
return nil
},
subscribeError: nil,
wantErr: false,
}, {
name: "subscribe with error",
reqMsg: &pubsub.NewMessage{
Data: []byte("data"),
Topic: "some-topic",
},
subscribeHandler: func(ctx context.Context, msg *pubsub.NewMessage) error {
return nil
},
subscribeError: fmt.Errorf("some error"),
wantErr: true,
},
}
for _, tt := range tests {
k := newKubeMQEventsStore(logger.NewLogger("kubemq-test"))
k.ctx, k.ctxCancel = context.WithCancel(context.Background())
k.client = newKubemqEventsStoreMock().
setSubscribeError(tt.subscribeError)
k.isInitialized = true
k.metadata = &metadata{
host: "",
port: 0,
clientID: "some-client-id",
authToken: "",
group: "",
isStore: true,
}
err := k.Subscribe(k.ctx, pubsub.SubscribeRequest{Topic: "some-topic"}, tt.subscribeHandler)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
_ = k.Features()
_ = k.Close()
}
}

View File

@ -0,0 +1,162 @@
package kubemq
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
mdata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
func getMockEventsClient() *kubeMQEvents {
return &kubeMQEvents{
client: newKubemqEventsMock(),
metadata: nil,
logger: nil,
publishFunc: nil,
resultChan: nil,
waitForResultTimeout: 0,
ctx: nil,
ctxCancel: nil,
isInitialized: true,
}
}
func getMockEventsStoreClient() *kubeMQEventStore {
return &kubeMQEventStore{
client: newKubemqEventsStoreMock(),
metadata: nil,
logger: nil,
publishFunc: nil,
resultChan: nil,
waitForResultTimeout: 0,
ctx: nil,
ctxCancel: nil,
isInitialized: true,
}
}
func Test_kubeMQ_Init(t *testing.T) {
tests := []struct {
name string
meta pubsub.Metadata
eventsClient *kubeMQEvents
eventStoreClient *kubeMQEventStore
wantErr bool
}{
{
name: "init events store client",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"clientID": "clientID",
"authToken": "authToken",
"group": "group",
"store": "true",
"useMock": "true",
},
},
},
eventsClient: nil,
eventStoreClient: getMockEventsStoreClient(),
wantErr: false,
},
{
name: "init events client",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"clientID": "clientID",
"authToken": "authToken",
"group": "group",
"store": "false",
"useMock": "true",
},
},
},
eventsClient: getMockEventsClient(),
eventStoreClient: nil,
wantErr: false,
},
{
name: "init error",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "badaddress",
},
},
},
eventsClient: nil,
eventStoreClient: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
k := NewKubeMQ(logger.NewLogger("test"))
err := k.Init(tt.meta)
assert.Equal(t, tt.wantErr, err != nil)
})
}
}
func Test_kubeMQ_Close(t *testing.T) {
type fields struct {
metadata *metadata
logger logger.Logger
ctx context.Context
ctxCancel context.CancelFunc
eventsClient *kubeMQEvents
eventStoreClient *kubeMQEventStore
}
tests := []struct {
name string
fields fields
wantErr assert.ErrorAssertionFunc
}{
{
name: "close events client",
fields: fields{
metadata: &metadata{
isStore: false,
},
eventsClient: getMockEventsClient(),
eventStoreClient: nil,
},
wantErr: assert.NoError,
},
{
name: "close events store client",
fields: fields{
metadata: &metadata{
isStore: true,
},
eventsClient: nil,
eventStoreClient: getMockEventsStoreClient(),
},
wantErr: assert.NoError,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
k := &kubeMQ{
metadata: tt.fields.metadata,
logger: tt.fields.logger,
ctx: tt.fields.ctx,
ctxCancel: tt.fields.ctxCancel,
eventsClient: tt.fields.eventsClient,
eventStoreClient: tt.fields.eventStoreClient,
}
tt.wantErr(t, k.Close(), fmt.Sprintf("Close()"))
})
}
}

80
pubsub/kubemq/metadata.go Normal file
View File

@ -0,0 +1,80 @@
package kubemq
import (
"fmt"
"strconv"
"strings"
"github.com/dapr/components-contrib/pubsub"
)
type metadata struct {
host string
port int
clientID string
authToken string
group string
isStore bool
disableReDelivery bool
}
func parseAddress(address string) (string, int, error) {
var host string
var port int
var err error
hostPort := strings.Split(address, ":")
if len(hostPort) != 2 {
return "", 0, fmt.Errorf("invalid kubeMQ address, address format is invalid")
}
host = hostPort[0]
if len(host) == 0 {
return "", 0, fmt.Errorf("invalid kubeMQ address, host is empty")
}
port, err = strconv.Atoi(hostPort[1])
if err != nil {
return "", 0, fmt.Errorf("invalid kubeMQ address, port is invalid")
}
return host, port, nil
}
// createMetadata creates a new instance from the pubsub metadata
func createMetadata(pubSubMetadata pubsub.Metadata) (*metadata, error) {
result := &metadata{}
if val, found := pubSubMetadata.Properties["address"]; found && val != "" {
var err error
result.host, result.port, err = parseAddress(val)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("invalid kubeMQ address, address is empty")
}
if val, found := pubSubMetadata.Properties["clientID"]; found && val != "" {
result.clientID = val
}
if val, found := pubSubMetadata.Properties["authToken"]; found && val != "" {
result.authToken = val
}
if val, found := pubSubMetadata.Properties["group"]; found && val != "" {
result.group = val
}
result.isStore = true
if val, found := pubSubMetadata.Properties["store"]; found && val != "" {
switch val {
case "false":
result.isStore = false
case "true":
result.isStore = true
default:
return nil, fmt.Errorf("invalid kubeMQ store value, store can be true or false")
}
}
if val, found := pubSubMetadata.Properties["disableReDelivery"]; found && val != "" {
if val == "true" {
result.disableReDelivery = true
}
}
return result, nil
}

View File

@ -0,0 +1,170 @@
package kubemq
import (
"testing"
"github.com/stretchr/testify/assert"
mdata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub"
)
func Test_createMetadata(t *testing.T) {
tests := []struct {
name string
meta pubsub.Metadata
want *metadata
wantErr bool
}{
{
name: "create valid metadata",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"clientID": "clientID",
"authToken": "authToken",
"group": "group",
"store": "true",
"useMock": "true",
"disableReDelivery": "true",
},
},
},
want: &metadata{
host: "localhost",
port: 50000,
clientID: "clientID",
authToken: "authToken",
group: "group",
isStore: true,
disableReDelivery: true,
},
wantErr: false,
},
{
name: "create valid metadata with empty group",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000",
"clientID": "clientID",
"authToken": "authToken",
"store": "false",
},
},
},
want: &metadata{
host: "localhost",
port: 50000,
clientID: "clientID",
authToken: "authToken",
group: "",
isStore: false,
},
wantErr: false,
},
{
name: "create valid metadata with empty authToken",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000",
"channel": "test",
"clientID": "clientID",
"group": "group",
"store": "true",
},
},
},
want: &metadata{
host: "localhost",
port: 50000,
clientID: "clientID",
authToken: "",
group: "group",
isStore: true,
},
wantErr: false,
},
{
name: "create invalid metadata with bad host",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": ":50000",
"clientID": "clientID",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid metadata with bad port",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:badport",
"clientID": "clientID",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid metadata with empty address",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "",
"clientID": "clientID",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid metadata with bad address format",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000:badport",
"clientID": "clientID",
},
},
},
want: nil,
wantErr: true,
},
{
name: "create invalid metadata with bad store info",
meta: pubsub.Metadata{
Base: mdata.Base{
Properties: map[string]string{
"address": "localhost:50000",
"clientID": "clientID",
"store": "bad",
},
},
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := createMetadata(tt.meta)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.want, got)
}
})
}
}

View File

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"net/url" "net/url"
"regexp" "regexp"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -249,11 +250,6 @@ func (m *mqttPubSub) onMessage(ctx context.Context) func(client mqtt.Client, mqt
return func(client mqtt.Client, mqttMsg mqtt.Message) { return func(client mqtt.Client, mqttMsg mqtt.Message) {
ack := false ack := false
defer func() { defer func() {
// Do not send N/ACKs on retained messages
if mqttMsg.Retained() {
return
}
// MQTT does not support NACK's, so in case of error we need to re-enqueue the message and then send a positive ACK for this message // MQTT does not support NACK's, so in case of error we need to re-enqueue the message and then send a positive ACK for this message
// Note that if the connection drops before the message is explicitly ACK'd below, then it's automatically re-sent (assuming QoS is 1 or greater, which is the default). So we do not risk losing messages. // Note that if the connection drops before the message is explicitly ACK'd below, then it's automatically re-sent (assuming QoS is 1 or greater, which is the default). So we do not risk losing messages.
// Problem with this approach is that if the service crashes between the time the message is re-enqueued and when the ACK is sent, the message may be delivered twice // Problem with this approach is that if the service crashes between the time the message is re-enqueued and when the ACK is sent, the message may be delivered twice
@ -283,6 +279,7 @@ func (m *mqttPubSub) onMessage(ctx context.Context) func(client mqtt.Client, mqt
msg := pubsub.NewMessage{ msg := pubsub.NewMessage{
Topic: mqttMsg.Topic(), Topic: mqttMsg.Topic(),
Data: mqttMsg.Payload(), Data: mqttMsg.Payload(),
Metadata: map[string]string{"retained": strconv.FormatBool(mqttMsg.Retained())},
} }
topicHandler := m.handlerForTopic(msg.Topic) topicHandler := m.handlerForTopic(msg.Topic)

View File

@ -21,8 +21,10 @@ import (
amqp "github.com/rabbitmq/amqp091-go" amqp "github.com/rabbitmq/amqp091-go"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub"
) )
type metadata struct { type metadata struct {
@ -45,6 +47,7 @@ type metadata struct {
exchangeKind string exchangeKind string
publisherConfirm bool publisherConfirm bool
concurrency pubsub.ConcurrencyMode concurrency pubsub.ConcurrencyMode
defaultQueueTTL *time.Duration
} }
const ( const (
@ -191,6 +194,15 @@ func createMetadata(pubSubMetadata pubsub.Metadata, log logger.Logger) (*metadat
} }
} }
ttl, ok, err := contribMetadata.TryGetTTL(pubSubMetadata.Properties)
if err != nil {
return &result, fmt.Errorf("%s parse RabbitMQ ttl metadata with error: %s", errorMessagePrefix, err)
}
if ok {
result.defaultQueueTTL = &ttl
}
c, err := pubsub.Concurrency(pubSubMetadata.Properties) c, err := pubsub.Concurrency(pubSubMetadata.Properties)
if err != nil { if err != nil {
return &result, err return &result, err

View File

@ -24,8 +24,10 @@ import (
amqp "github.com/rabbitmq/amqp091-go" amqp "github.com/rabbitmq/amqp091-go"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub"
) )
const ( const (
@ -65,6 +67,8 @@ type rabbitMQ struct {
} }
// interface used to allow unit testing. // interface used to allow unit testing.
//
//nolint:interfacebloat
type rabbitMQChannelBroker interface { type rabbitMQChannelBroker interface {
PublishWithContext(ctx context.Context, exchange string, key string, mandatory bool, immediate bool, msg amqp.Publishing) error PublishWithContext(ctx context.Context, exchange string, key string, mandatory bool, immediate bool, msg amqp.Publishing) error
PublishWithDeferredConfirmWithContext(ctx context.Context, exchange string, key string, mandatory bool, immediate bool, msg amqp.Publishing) (*amqp.DeferredConfirmation, error) PublishWithDeferredConfirmWithContext(ctx context.Context, exchange string, key string, mandatory bool, immediate bool, msg amqp.Publishing) (*amqp.DeferredConfirmation, error)
@ -190,10 +194,23 @@ func (r *rabbitMQ) publishSync(req *pubsub.PublishRequest) (rabbitMQChannelBroke
routingKey = val routingKey = val
} }
ttl, ok, err := contribMetadata.TryGetTTL(req.Metadata)
if err != nil {
r.logger.Warnf("%s publishing to %s failed parse TryGetTTL: %v, it is ignored.", logMessagePrefix, req.Topic, err)
}
var expiration string
if ok {
// RabbitMQ expects the duration in ms
expiration = strconv.FormatInt(ttl.Milliseconds(), 10)
} else if r.metadata.defaultQueueTTL != nil {
expiration = strconv.FormatInt(r.metadata.defaultQueueTTL.Milliseconds(), 10)
}
confirm, err := r.channel.PublishWithDeferredConfirmWithContext(r.ctx, req.Topic, routingKey, false, false, amqp.Publishing{ confirm, err := r.channel.PublishWithDeferredConfirmWithContext(r.ctx, req.Topic, routingKey, false, false, amqp.Publishing{
ContentType: "text/plain", ContentType: "text/plain",
Body: req.Data, Body: req.Data,
DeliveryMode: r.metadata.deliveryMode, DeliveryMode: r.metadata.deliveryMode,
Expiration: expiration,
}) })
if err != nil { if err != nil {
r.logger.Errorf("%s publishing to %s failed in channel.Publish: %v", logMessagePrefix, req.Topic, err) r.logger.Errorf("%s publishing to %s failed in channel.Publish: %v", logMessagePrefix, req.Topic, err)
@ -545,7 +562,7 @@ func (r *rabbitMQ) Close() error {
} }
func (r *rabbitMQ) Features() []pubsub.Feature { func (r *rabbitMQ) Features() []pubsub.Feature {
return nil return []pubsub.Feature{pubsub.FeatureMessageTTL}
} }
func mustReconnect(channel rabbitMQChannelBroker, err error) bool { func mustReconnect(channel rabbitMQChannelBroker, err error) bool {

View File

@ -14,68 +14,171 @@ limitations under the License.
package rocketmq package rocketmq
import ( import (
"errors"
"fmt" "fmt"
"github.com/dapr/components-contrib/metadata" "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
)
var (
ErrRocketmqPublishMsg = errors.New("rocketmq publish msg error")
ErrRocketmqValidPublishMsgTyp = errors.New("rocketmq publish msg error, invalid msg type")
) )
const ( const (
metadataRocketmqTag = "rocketmq-tag" metadataRocketmqTag = "rocketmq-tag"
metadataRocketmqKey = "rocketmq-key" metadataRocketmqKey = "rocketmq-key"
metadataRocketmqShardingKey = "rocketmq-shardingkey" metadataRocketmqShardingKey = "rocketmq-shardingkey"
metadataRocketmqQueue = "rocketmq-queue"
metadataRocketmqConsumerGroup = "rocketmq-consumerGroup" metadataRocketmqConsumerGroup = "rocketmq-consumerGroup"
metadataRocketmqType = "rocketmq-sub-type" metadataRocketmqType = "rocketmq-sub-type"
metadataRocketmqExpression = "rocketmq-sub-expression" metadataRocketmqExpression = "rocketmq-sub-expression"
metadataRocketmqBrokerName = "rocketmq-broker-name" metadataRocketmqBrokerName = "rocketmq-broker-name"
metadataRocketmqQueueID = "rocketmq-queue-id"
) )
type QueueSelectorType string
const (
HashQueueSelector QueueSelectorType = "hash"
RandomQueueSelector QueueSelectorType = "random"
ManualQueueSelector QueueSelectorType = "manual"
RoundRobinQueueSelector QueueSelectorType = "roundRobin"
DaprQueueSelector QueueSelectorType = "dapr"
)
// RocketMQ Go Client Options
type rocketMQMetaData struct { type rocketMQMetaData struct {
// rocketmq instance name, it will be registered to the broker
InstanceName string `mapstructure:"instanceName"`
// Deprecated: consumer group name
GroupName string `mapstructure:"groupName"`
ConsumerGroup string `mapstructure:"consumerGroup"`
// producer group name
ProducerGroup string `mapstructure:"producerGroup"`
// rocketmq namespace
NameSpace string `mapstructure:"nameSpace"`
// rocketmq's name server domain
NameServerDomain string `mapstructure:"nameServerDomain"`
// rocketmq's name server
NameServer string `mapstructure:"nameServer"`
// rocketmq Credentials // rocketmq Credentials
AccessKey string `mapstructure:"accessKey"` AccessKey string `mapstructure:"accessKey"`
SecretKey string `mapstructure:"secretKey"` SecretKey string `mapstructure:"secretKey"`
NameServer string `mapstructure:"nameServer"` SecurityToken string `mapstructure:"securityToken"`
// Deprecated: use ProducerGroup instead. // retry times to send msg to broker
GroupName string `mapstructure:"groupName"`
ProducerGroup string `mapstructure:"producerGroup"`
NameSpace string `mapstructure:"nameSpace"`
// consumer group rocketmq's subscribers
ConsumerGroup string `mapstructure:"consumerGroup"`
ConsumerBatchSize int `mapstructure:"consumerBatchSize"`
// rocketmq's name server domain
NameServerDomain string `mapstructure:"nameServerDomain"`
// msg's content-type
ContentType string `mapstructure:"content-type"`
// retry times to connect rocketmq's broker
Retries int `mapstructure:"retries"` Retries int `mapstructure:"retries"`
// Producer Queue selector
// There are five implementations of queue selectorHash, Random, Manual, RoundRobin, Daprrespectively
//
// Dapr Queue selector is design by dapr developers
ProducerQueueSelector QueueSelectorType `mapstructure:"producerQueueSelector"`
// Message model defines the way how messages are delivered to each consumer clients
// RocketMQ supports two message models: clustering and broadcasting. If clustering is set, consumer clients with
// the same {@link #ConsumerGroup} would only consume shards of the messages subscribed, which achieves load
// balances; Conversely, if the broadcasting is set, each consumer client will consume all subscribed messages
// separately.
//
// This field defaults to clustering.
ConsumerModel string `mapstructure:"consumerModel"`
// Consuming point on consumer booting.
// There are three consuming points:
// - CONSUME_FROM_LAST_OFFSET: consumer clients pick up where it stopped previously. If it were a newly booting up
// consumer client, according aging of the consumer group, there are two cases.
// cases1:
// if the consumer group is created so recently that the earliest message being subscribed has yet
// expired, which means the consumer group represents a lately launched business, consuming will
// start from the very beginning.
// case2:
// if the earliest message being subscribed has expired, consuming will start from the latest messages,
// meaning messages born prior to the booting timestamp would be ignored.
// - CONSUME_FROM_FIRST_OFFSET: Consumer client will start from earliest messages available.
// - CONSUME_FROM_TIMESTAMP: Consumer client will start from specified timestamp, which means messages born
// prior to {@link #consumeTimestamp} will be ignored
FromWhere string `mapstructure:"fromWhere"`
/**
* Backtracking consumption time with second precision. Time format is
* 20131223171201<br>
* Implying Seventeen twelve and 01 seconds on December 23, 2013 year<br>
* Default backtracking consumption time Half an hour ago.
*
* RocketMQ Go Client does not support configuration in github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2
*/
ConsumeTimestamp string `mapstructure:"consumeTimestamp"`
// Whether it is an ordered message using FIFO order
//
// This field defaults to false.
ConsumeOrderly string `mapstructure:"consumeOrderly"`
// Batch consumption size
ConsumeMessageBatchMaxSize int `mapstructure:"consumeMessageBatchMaxSize"`
// Concurrently max span offset.it has no effect on sequential consumption
ConsumeConcurrentlyMaxSpan int `mapstructure:"consumeConcurrentlyMaxSpan"`
// Max re-consume times. -1 means 16 times.
//
// If messages are re-consumed more than {@link #maxReconsumeTimes} before Success, it's be directed to a deletion
// queue waiting.
MaxReconsumeTimes int32 `mapstructure:"maxReconsumeTimes"`
AutoCommit string `mapstructure:"autoCommit"`
// Maximum amount of time a message may block the consuming thread.
//
// RocketMQ Go Client does not support configuration in github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2
ConsumeTimeout int `mapstructure:"consumeTimeout"`
// The socket timeout in milliseconds
ConsumerPullTimeout int `mapstructure:"consumerPullTimeout"`
// Message pull Interval
PullInterval int `mapstructure:"pullInterval"`
// Deprecated: The number of messages pulled from the broker at a time
ConsumerBatchSize int `mapstructure:"consumerBatchSize"`
// The number of messages pulled from the broker at a time
PullBatchSize int32 `mapstructure:"pullBatchSize"`
// Flow control threshold on queue level, each message queue will cache at most 1000 messages by default,
// Consider the {PullBatchSize}, the instantaneous value may exceed the limit
//
// RocketMQ Go Client does not support configuration in github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2
PullThresholdForQueue int64 `mapstructure:"pullThresholdForQueue"`
// Flow control threshold on topic level, default value is -1(Unlimited)
//
// The value of {@code pullThresholdForQueue} will be overwritten and calculated based on
// {@code pullThresholdForTopic} if it isn't unlimited
//
// For example, if the value of pullThresholdForTopic is 1000 and 10 message queues are assigned to this consumer,
// then pullThresholdForQueue will be set to 100
//
// RocketMQ Go Client does not support configuration in github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2
PullThresholdForTopic int64 `mapstructure:"pullThresholdForTopic"`
// RocketMQ Go Client does not support configuration in github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2
PullThresholdSizeForQueue int `mapstructure:"pullThresholdSizeForQueue"`
// Limit the cached message size on topic level, default value is -1 MiB(Unlimited)
//
// The value of {@code pullThresholdSizeForQueue} will be overwritten and calculated based on
// {@code pullThresholdSizeForTopic} if it isn't unlimited
//
// For example, if the value of pullThresholdSizeForTopic is 1000 MiB and 10 message queues are
// assigned to this consumer, then pullThresholdSizeForQueue will be set to 100 MiB
//
// RocketMQ Go Client does not support configuration in github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2
PullThresholdSizeForTopic int `mapstructure:"pullThresholdSizeForTopic"`
ContentType string `mapstructure:"content-type"` // msg's content-type
// Deprecated: send msg timeout to connect rocketmq's broker, nanoseconds // Deprecated: send msg timeout to connect rocketmq's broker, nanoseconds
SendTimeOut int `mapstructure:"sendTimeOut"` SendTimeOut int `mapstructure:"sendTimeOut"`
// send msg timeout to connect rocketmq's broker, seconds // timeout for send msg to rocketmq broker, in seconds
SendTimeOutSec int `mapstructure:"sendTimeOutSec"` SendTimeOutSec int `mapstructure:"sendTimeOutSec"`
} LogLevel string `mapstructure:"logLevel"`
func getDefaultRocketMQMetaData() *rocketMQMetaData { // The RocketMQ message properties in this collection are passed to the APP in Data
return &rocketMQMetaData{ // Separate multiple properties with ","
AccessKey: "", MsgProperties string `mapstructure:"mspProperties"`
SecretKey: "",
NameServer: "",
GroupName: "",
ProducerGroup: "",
NameSpace: "",
ConsumerGroup: "",
ConsumerBatchSize: 0,
NameServerDomain: "",
ContentType: pubsub.DefaultCloudEventDataContentType,
Retries: 3,
SendTimeOutSec: 60,
}
} }
func (s *rocketMQMetaData) Decode(in interface{}) error { func (s *rocketMQMetaData) Decode(in interface{}) error {
@ -85,8 +188,18 @@ func (s *rocketMQMetaData) Decode(in interface{}) error {
return nil return nil
} }
func parseRocketMQMetaData(metadata pubsub.Metadata, logger logger.Logger) (*rocketMQMetaData, error) { const (
rMetaData := getDefaultRocketMQMetaData() KeyConsumeFromWhere string = "consumeFromWhere"
KeyQueueSelector string = "queueSelector"
)
func parseRocketMQMetaData(metadata pubsub.Metadata) (*rocketMQMetaData, error) {
rMetaData := &rocketMQMetaData{
Retries: 3,
LogLevel: "warn",
PullInterval: 100,
ConsumerPullTimeout: 30,
}
if metadata.Properties != nil { if metadata.Properties != nil {
err := rMetaData.Decode(metadata.Properties) err := rMetaData.Decode(metadata.Properties)
if err != nil { if err != nil {
@ -94,19 +207,16 @@ func parseRocketMQMetaData(metadata pubsub.Metadata, logger logger.Logger) (*roc
} }
} }
if rMetaData.GroupName != "" {
logger.Warn("pubsub.rocketmq: metadata property 'groupName' has been deprecated - use 'producerGroup' instead. See: https://docs.dapr.io/reference/components-reference/supported-pubsub/setup-rocketmq/")
}
if rMetaData.ProducerGroup == "" { if rMetaData.ProducerGroup == "" {
rMetaData.ProducerGroup = metadata.Properties[pubsub.RuntimeConsumerIDKey] rMetaData.ProducerGroup = metadata.Properties[pubsub.RuntimeConsumerIDKey]
} }
if rMetaData.SendTimeOut != 0 { if rMetaData.FromWhere == "" {
logger.Warn("pubsub.rocketmq: metadata property 'sendTimeOut' has been deprecated - use 'sendTimeOutSec' instead. See: https://docs.dapr.io/reference/components-reference/supported-pubsub/setup-rocketmq/") rMetaData.FromWhere = metadata.Properties[KeyConsumeFromWhere]
if rMetaData.SendTimeOutSec == 0 {
rMetaData.SendTimeOutSec = rMetaData.SendTimeOut / 1000000
} }
if rMetaData.ProducerQueueSelector == "" {
rMetaData.ProducerQueueSelector = QueueSelectorType(metadata.Properties[KeyQueueSelector])
} }
return rMetaData, nil return rMetaData, nil

View File

@ -21,21 +21,76 @@ import (
mdata "github.com/dapr/components-contrib/metadata" mdata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger"
) )
func TestMetaDataDecode(t *testing.T) { func TestMetaDataDecode(t *testing.T) {
props := map[string]string{ props := map[string]string{
"accessKey": "**", "instanceName": "dapr-rocketmq-test",
"secretKey": "***", "producerGroup": "dapr-rocketmq-test-g-p",
"nameServer": "http://test.nameserver", "consumerGroup": "dapr-rocketmq-test-g-c",
"consumerGroup": "defaultGroup", "groupName": "dapr-rocketmq-test-g-c",
"nameSpace": "defaultNamespace", "nameSpace": "dapr-test",
"nameServerDomain": "www.baidu.com",
"nameServer": "test.nameserver",
"accessKey": "accessKey",
"secretKey": "secretKey",
"securityToken": "securityToken",
"retries": "5",
"consumerModel": "Clustering",
"fromWhere": "ConsumeFromLastOffset",
"consumeTimestamp": "20220817101902",
"consumeOrderly": "true",
"consumeMessageBatchMaxSize": "10",
"consumeConcurrentlyMaxSpan": "10",
"maxReconsumeTimes": "10000",
"autoCommit": "true",
"consumeTimeout": "10",
"consumerPullTimeout": "10",
"pullInterval": "10",
"consumerBatchSize": "10",
"pullBatchSize": "10",
"pullThresholdForQueue": "100",
"pullThresholdForTopic": "100",
"pullThresholdSizeForQueue": "10",
"pullThresholdSizeForTopic": "10",
"content-type": "json",
"sendTimeOutSec": "10",
"logLevel": "ERROR",
"mspProperties": "UNIQ_KEY",
} }
pubsubMeta := pubsub.Metadata{Base: mdata.Base{Properties: props}} pubsubMeta := pubsub.Metadata{Base: mdata.Base{Properties: props}}
metaData, err := parseRocketMQMetaData(pubsubMeta, logger.NewLogger("test")) metaData, err := parseRocketMQMetaData(pubsubMeta)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "**", metaData.AccessKey) assert.Equal(t, "dapr-rocketmq-test", metaData.InstanceName)
assert.Equal(t, "***", metaData.SecretKey) assert.Equal(t, "dapr-rocketmq-test-g-p", metaData.ProducerGroup)
assert.Equal(t, "defaultGroup", metaData.ConsumerGroup) assert.Equal(t, "dapr-rocketmq-test-g-c", metaData.ConsumerGroup)
assert.Equal(t, "dapr-rocketmq-test-g-c", metaData.GroupName)
assert.Equal(t, "dapr-test", metaData.NameSpace)
assert.Equal(t, "www.baidu.com", metaData.NameServerDomain)
assert.Equal(t, "test.nameserver", metaData.NameServer)
assert.Equal(t, "accessKey", metaData.AccessKey)
assert.Equal(t, "secretKey", metaData.SecretKey)
assert.Equal(t, "securityToken", metaData.SecurityToken)
assert.Equal(t, 5, metaData.Retries)
assert.Equal(t, "Clustering", metaData.ConsumerModel)
assert.Equal(t, "ConsumeFromLastOffset", metaData.FromWhere)
assert.Equal(t, "20220817101902", metaData.ConsumeTimestamp)
assert.Equal(t, "true", metaData.ConsumeOrderly)
assert.Equal(t, 10, metaData.ConsumeMessageBatchMaxSize)
assert.Equal(t, 10, metaData.ConsumeConcurrentlyMaxSpan)
assert.Equal(t, int32(10000), metaData.MaxReconsumeTimes)
assert.Equal(t, "true", metaData.AutoCommit)
assert.Equal(t, 10, metaData.ConsumeTimeout)
assert.Equal(t, 10, metaData.ConsumerPullTimeout)
assert.Equal(t, 10, metaData.PullInterval)
assert.Equal(t, int32(10), metaData.PullBatchSize)
assert.Equal(t, int(10), metaData.ConsumerBatchSize)
assert.Equal(t, int64(100), metaData.PullThresholdForQueue)
assert.Equal(t, int64(100), metaData.PullThresholdForTopic)
assert.Equal(t, 10, metaData.PullThresholdSizeForQueue)
assert.Equal(t, 10, metaData.PullThresholdSizeForTopic)
assert.Equal(t, "json", metaData.ContentType)
assert.Equal(t, 10, metaData.SendTimeOutSec)
assert.Equal(t, "ERROR", metaData.LogLevel)
assert.Equal(t, "UNIQ_KEY", metaData.MsgProperties)
} }

View File

@ -16,7 +16,10 @@ package rocketmq
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"strconv"
"strings"
"sync" "sync"
"time" "time"
@ -24,32 +27,53 @@ import (
mqc "github.com/apache/rocketmq-client-go/v2/consumer" mqc "github.com/apache/rocketmq-client-go/v2/consumer"
"github.com/apache/rocketmq-client-go/v2/primitive" "github.com/apache/rocketmq-client-go/v2/primitive"
mqp "github.com/apache/rocketmq-client-go/v2/producer" mqp "github.com/apache/rocketmq-client-go/v2/producer"
"github.com/cenkalti/backoff/v4" "github.com/apache/rocketmq-client-go/v2/rlog"
"github.com/dapr/components-contrib/internal/utils"
"github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/pubsub"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
"github.com/dapr/kit/retry"
) )
type topicData struct { type daprQueueSelector struct {
selector mqc.MessageSelector hashQueueSelector mqp.QueueSelector
handler pubsub.Handler roundRobinQueueSelector mqp.QueueSelector
consumerGroup string }
mqType string
mqExpr string func NewDaprQueueSelector() *daprQueueSelector {
return &daprQueueSelector{
hashQueueSelector: mqp.NewHashQueueSelector(),
roundRobinQueueSelector: mqp.NewRoundRobinQueueSelector(),
}
}
func (p *daprQueueSelector) Select(msg *primitive.Message, queues []*primitive.MessageQueue) *primitive.MessageQueue {
if msg.Queue != nil {
return msg.Queue
}
if queue := msg.GetProperty(metadataRocketmqQueue); queue != "" {
for _, q := range queues {
if strconv.Itoa(q.QueueId) == queue {
return q
}
}
}
key := msg.GetShardingKey()
if len(key) == 0 {
return p.roundRobinQueueSelector.Select(msg, queues)
}
return p.hashQueueSelector.Select(msg, queues)
} }
type rocketMQ struct { type rocketMQ struct {
name string name string
metadata *rocketMQMetaData metadata *rocketMQMetaData
logger logger.Logger
topics map[string]topicData
producer mq.Producer producer mq.Producer
producerLock sync.RWMutex producerLock sync.Mutex
consumer mq.PushConsumer consumer mq.PushConsumer
consumerLock sync.RWMutex consumerLock sync.Mutex
topics map[string]mqc.MessageSelector
msgProperties map[string]bool
logger logger.Logger
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
} }
@ -58,78 +82,197 @@ func NewRocketMQ(l logger.Logger) pubsub.PubSub {
return &rocketMQ{ return &rocketMQ{
name: "rocketmq", name: "rocketmq",
logger: l, logger: l,
topics: make(map[string]topicData), producerLock: sync.Mutex{},
producerLock: sync.RWMutex{}, consumerLock: sync.Mutex{},
consumerLock: sync.RWMutex{},
} }
} }
func (r *rocketMQ) Init(metadata pubsub.Metadata) error { func (r *rocketMQ) Init(metadata pubsub.Metadata) error {
var err error var err error
r.metadata, err = parseRocketMQMetaData(metadata, r.logger) r.metadata, err = parseRocketMQMetaData(metadata)
if err != nil { if err != nil {
return err return err
} }
r.topics = make(map[string]mqc.MessageSelector)
r.msgProperties = make(map[string]bool)
rlog.SetLogLevel(r.metadata.LogLevel)
if r.metadata.MsgProperties != "" {
mps := strings.Split(r.metadata.MsgProperties, ",")
for _, mp := range mps {
r.msgProperties[mp] = true
}
}
r.ctx, r.cancel = context.WithCancel(context.Background()) r.ctx, r.cancel = context.WithCancel(context.Background())
return nil return nil
} }
func parseNameServer(nameServer string) []string {
if strings.Contains(nameServer, ",") {
return strings.Split(nameServer, ",")
} else if strings.Contains(nameServer, ";") {
return strings.Split(nameServer, ";")
} else {
return []string{nameServer}
}
}
func (r *rocketMQ) setUpConsumer() (mq.PushConsumer, error) { func (r *rocketMQ) setUpConsumer() (mq.PushConsumer, error) {
opts := make([]mqc.Option, 0) opts := make([]mqc.Option, 0)
if r.metadata.InstanceName != "" {
opts = append(opts, mqc.WithInstance(r.metadata.InstanceName))
}
if r.metadata.ConsumerGroup != "" { if r.metadata.ConsumerGroup != "" {
opts = append(opts, mqc.WithGroupName(r.metadata.ConsumerGroup)) opts = append(opts, mqc.WithGroupName(r.metadata.ConsumerGroup))
} else if r.metadata.GroupName != "" {
r.metadata.ConsumerGroup = r.metadata.GroupName
opts = append(opts, mqc.WithGroupName(r.metadata.ConsumerGroup))
r.logger.Warnf("set the consumer group name, please use the keyword consumerGroup")
} }
if r.metadata.ConsumerBatchSize != 0 { if r.metadata.NameServer != "" {
opts = append(opts, mqc.WithPullBatchSize(int32(r.metadata.ConsumerBatchSize))) opts = append(opts, mqc.WithNameServer(parseNameServer(r.metadata.NameServer)))
} }
if r.metadata.NameSpace != "" { if r.metadata.NameSpace != "" {
opts = append(opts, mqc.WithNamespace(r.metadata.NameSpace)) opts = append(opts, mqc.WithNamespace(r.metadata.NameSpace))
} }
if r.metadata.Retries != 0 {
opts = append(opts, mqc.WithRetry(r.metadata.Retries))
}
if r.metadata.NameServerDomain != "" { if r.metadata.NameServerDomain != "" {
opts = append(opts, mqc.WithNameServerDomain(r.metadata.NameServerDomain)) opts = append(opts, mqc.WithNameServerDomain(r.metadata.NameServerDomain))
} }
if r.metadata.NameServer != "" {
opts = append(opts, mqc.WithNameServer(primitive.NamesrvAddr{r.metadata.NameServer}))
}
if r.metadata.AccessKey != "" && r.metadata.SecretKey != "" { if r.metadata.AccessKey != "" && r.metadata.SecretKey != "" {
opts = append(opts, mqc.WithCredentials(primitive.Credentials{ opts = append(opts, mqc.WithCredentials(primitive.Credentials{
AccessKey: r.metadata.AccessKey, AccessKey: r.metadata.AccessKey,
SecretKey: r.metadata.SecretKey, SecretKey: r.metadata.SecretKey,
SecurityToken: r.metadata.SecurityToken,
})) }))
} }
return mq.NewPushConsumer(opts...) if r.metadata.Retries > 0 {
opts = append(opts, mqc.WithRetry(r.metadata.Retries))
}
if r.metadata.ConsumerModel != "" {
switch strings.ToLower(r.metadata.ConsumerModel) {
case "broadcasting":
opts = append(opts, mqc.WithConsumerModel(mqc.BroadCasting))
case "clustering":
opts = append(opts, mqc.WithConsumerModel(mqc.Clustering))
default:
r.metadata.ConsumerModel = "Clustering"
opts = append(opts, mqc.WithConsumerModel(mqc.Clustering))
r.logger.Warnf("%s Consumer Model[%s] is invalid: expected [broadcasting, clustering]; "+
"we will use default model [clustering]", r.name, r.metadata.ConsumerModel)
}
}
if r.metadata.FromWhere != "" {
switch strings.ToLower(r.metadata.FromWhere) {
case "consumefromlastoffset":
opts = append(opts, mqc.WithConsumeFromWhere(mqc.ConsumeFromLastOffset))
case "consumefromfirstoffset":
opts = append(opts, mqc.WithConsumeFromWhere(mqc.ConsumeFromFirstOffset))
case "consumefromtimestamp":
opts = append(opts, mqc.WithConsumeFromWhere(mqc.ConsumeFromTimestamp))
default:
r.metadata.FromWhere = "ConsumeFromLastOffset"
opts = append(opts, mqc.WithConsumeFromWhere(mqc.ConsumeFromLastOffset))
r.logger.Warnf("%s Consumer FromWhere[%s] is error, "+
"expected [ConsumeFromLastOffset, ConsumeFromFirstOffset, ConsumeFromTimestamp], "+
"we will use default value [ConsumeFromLastOffset]", r.name, r.metadata.FromWhere)
}
}
if r.metadata.ConsumeOrderly != "" {
if utils.IsTruthy(r.metadata.ConsumeOrderly) {
opts = append(opts, mqc.WithConsumerOrder(true))
// in orderly message mode, if no value is set of MessageBatchMaxSize, the recommended value [1] is used
if r.metadata.ConsumeMessageBatchMaxSize <= 0 {
r.metadata.ConsumeMessageBatchMaxSize = 1
}
} else {
opts = append(opts, mqc.WithConsumerOrder(false))
}
}
if r.metadata.ConsumeMessageBatchMaxSize > 0 {
opts = append(opts, mqc.WithConsumeMessageBatchMaxSize(r.metadata.ConsumeMessageBatchMaxSize))
}
if r.metadata.MaxReconsumeTimes > 0 {
opts = append(opts, mqc.WithMaxReconsumeTimes(r.metadata.MaxReconsumeTimes))
}
if r.metadata.AutoCommit != "" {
opts = append(opts, mqc.WithAutoCommit(utils.IsTruthy(r.metadata.AutoCommit)))
}
if r.metadata.PullInterval > 0 {
opts = append(opts, mqc.WithPullInterval(time.Duration(r.metadata.PullInterval)*time.Millisecond))
}
if r.metadata.PullBatchSize > 0 {
opts = append(opts, mqc.WithPullBatchSize(r.metadata.PullBatchSize))
} else if r.metadata.ConsumerBatchSize > 0 {
r.metadata.PullBatchSize = int32(r.metadata.ConsumerBatchSize)
opts = append(opts, mqc.WithPullBatchSize(r.metadata.PullBatchSize))
r.logger.Warn("set the number of msg pulled from the broker at a time, " +
"please use pullBatchSize instead of consumerBatchSize")
}
c, e := mqc.NewPushConsumer(opts...)
if e != nil {
return nil, e
}
return c, e
} }
func (r *rocketMQ) setUpProducer() (mq.Producer, error) { func (r *rocketMQ) setUpProducer() (mq.Producer, error) {
opts := make([]mqp.Option, 0) opts := make([]mqp.Option, 0)
if r.metadata.Retries != 0 { if r.metadata.InstanceName != "" {
opts = append(opts, mqp.WithRetry(r.metadata.Retries)) opts = append(opts, mqp.WithInstanceName(r.metadata.InstanceName))
}
if r.metadata.GroupName != "" {
opts = append(opts, mqp.WithGroupName(r.metadata.GroupName))
} }
if r.metadata.ProducerGroup != "" { if r.metadata.ProducerGroup != "" {
opts = append(opts, mqp.WithGroupName(r.metadata.ProducerGroup)) opts = append(opts, mqp.WithGroupName(r.metadata.ProducerGroup))
} else if r.metadata.GroupName != "" {
r.metadata.ProducerGroup = r.metadata.GroupName
opts = append(opts, mqp.WithGroupName(r.metadata.ProducerGroup))
r.logger.Warnf("set the producer group name, please use the keyword producerGroup")
} }
if r.metadata.NameServerDomain != "" { if r.metadata.NameServer != "" {
opts = append(opts, mqp.WithNameServerDomain(r.metadata.NameServerDomain)) opts = append(opts, mqp.WithNameServer(parseNameServer(r.metadata.NameServer)))
} }
if r.metadata.NameSpace != "" { if r.metadata.NameSpace != "" {
opts = append(opts, mqp.WithNamespace(r.metadata.NameSpace)) opts = append(opts, mqp.WithNamespace(r.metadata.NameSpace))
} }
if r.metadata.NameServer != "" { if r.metadata.NameServerDomain != "" {
opts = append(opts, mqp.WithNameServer(primitive.NamesrvAddr{r.metadata.NameServer})) opts = append(opts, mqp.WithNameServerDomain(r.metadata.NameServerDomain))
} }
if r.metadata.AccessKey != "" && r.metadata.SecretKey != "" { if r.metadata.AccessKey != "" && r.metadata.SecretKey != "" {
opts = append(opts, mqp.WithCredentials(primitive.Credentials{ opts = append(opts, mqp.WithCredentials(primitive.Credentials{
AccessKey: r.metadata.AccessKey, AccessKey: r.metadata.AccessKey,
SecretKey: r.metadata.SecretKey, SecretKey: r.metadata.SecretKey,
SecurityToken: r.metadata.SecurityToken,
})) }))
} }
if r.metadata.Retries > 0 {
opts = append(opts, mqp.WithRetry(r.metadata.Retries))
}
if r.metadata.SendTimeOutSec > 0 {
opts = append(opts, mqp.WithSendMsgTimeout(time.Duration(r.metadata.SendTimeOutSec)*time.Second))
} else if r.metadata.SendTimeOut > 0 {
r.metadata.SendTimeOutSec = r.metadata.SendTimeOut / int(time.Second.Nanoseconds())
opts = append(opts, mqp.WithSendMsgTimeout(time.Duration(r.metadata.SendTimeOutSec)*time.Second))
r.logger.Warn("set the timeout for send msg to rocketmq broker, please use the keyword sendTimeOutSec. " +
"SendTimeOutSec is in seconds, SendTimeOut is in nanoseconds")
} else {
opts = append(opts, mqp.WithSendMsgTimeout(30*time.Second))
r.logger.Warn("You have not set a timeout for send msg to rocketmq broker, " +
"The default value of 30 seconds will be used. ")
}
switch r.metadata.ProducerQueueSelector {
case HashQueueSelector:
opts = append(opts, mqp.WithQueueSelector(mqp.NewHashQueueSelector()))
case RandomQueueSelector:
opts = append(opts, mqp.WithQueueSelector(mqp.NewRandomQueueSelector()))
case RoundRobinQueueSelector:
opts = append(opts, mqp.WithQueueSelector(mqp.NewRoundRobinQueueSelector()))
case ManualQueueSelector:
opts = append(opts, mqp.WithQueueSelector(mqp.NewManualQueueSelector()))
case DaprQueueSelector:
opts = append(opts, mqp.WithQueueSelector(NewDaprQueueSelector()))
default:
opts = append(opts, mqp.WithQueueSelector(NewDaprQueueSelector()))
}
producer, err := mq.NewProducer(opts...) producer, err := mq.NewProducer(opts...)
if err != nil { if err != nil {
return nil, err return nil, err
@ -146,208 +289,198 @@ func (r *rocketMQ) Features() []pubsub.Feature {
return nil return nil
} }
func (r *rocketMQ) getProducer() (mq.Producer, error) {
if nil != r.producer {
return r.producer, nil
}
r.producerLock.Lock()
defer r.producerLock.Unlock()
if nil != r.producer {
return r.producer, nil
}
producer, e := r.setUpProducer()
if e != nil {
return nil, e
}
r.producer = producer
return r.producer, nil
}
func (r *rocketMQ) resetProducer() {
r.producerLock.Lock()
defer r.producerLock.Unlock()
r.producer = nil
}
func (r *rocketMQ) Publish(req *pubsub.PublishRequest) error { func (r *rocketMQ) Publish(req *pubsub.PublishRequest) error {
r.logger.Debugf("rocketmq publish topic:%s with data:%v", req.Topic, req.Data) r.logger.Debugf("rocketmq publish topic:%s with data:%v", req.Topic, req.Data)
msg := newRocketMQMessage(req) msg := primitive.NewMessage(req.Topic, req.Data)
for k, v := range req.Metadata {
publishBo := backoff.NewExponentialBackOff() switch strings.ToLower(k) {
publishBo.InitialInterval = 100 * time.Millisecond case metadataRocketmqTag:
bo := backoff.WithMaxRetries(publishBo, 3) msg.WithTag(v)
bo = backoff.WithContext(bo, r.ctx) case metadataRocketmqKey:
return retry.NotifyRecover( msg.WithKeys(strings.Split(v, ","))
func() (err error) { case metadataRocketmqShardingKey:
r.producerLock.RLock() msg.WithShardingKey(v)
producer := r.producer default:
r.producerLock.RUnlock() msg.WithProperty(k, v)
if producer == nil {
r.producerLock.Lock()
r.producer, err = r.setUpProducer()
if err != nil {
r.producer = nil
}
producer = r.producer
r.producerLock.Unlock()
if err != nil {
return err
} }
} }
producer, e := r.getProducer()
sendTimeOut := time.Duration(r.metadata.SendTimeOutSec) * time.Second if e != nil {
ctx, cancel := context.WithTimeout(r.ctx, sendTimeOut) return fmt.Errorf("rocketmq message send fail because producer failed to initialize: %v", e)
defer cancel()
result, err := producer.SendSync(ctx, msg)
if err != nil {
r.producerLock.Lock()
r.producer = nil
r.producerLock.Unlock()
r.logger.Errorf("error send message topic:%s : %v", req.Topic, err)
return ErrRocketmqPublishMsg
} }
r.logger.Debugf("rocketmq send result topic:%s tag:%s status:%v", req.Topic, msg.GetTags(), result.Status) result, e := producer.SendSync(r.ctx, msg)
if e != nil {
r.resetProducer()
m := fmt.Sprintf("rocketmq message send fail, topic[%s]: %v", req.Topic, e)
r.logger.Error(m)
return errors.New(m)
}
r.logger.Debugf("rocketmq message send result: topic[%s], tag[%s], status[%v]", req.Topic, msg.GetTags(), result.Status)
return nil return nil
},
bo,
func(err error, d time.Duration) {
r.logger.Errorf("rocketmq error: fail to send message. topic:%s. Retrying...", msg.Topic)
},
func() {
r.logger.Infof("rocketmq successfully sent message after it previously failed. topic:%s.", msg.Topic)
},
)
} }
func newRocketMQMessage(req *pubsub.PublishRequest) *primitive.Message { func (r *rocketMQ) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
return primitive.NewMessage(req.Topic, req.Data). selector, e := buildMessageSelector(req)
WithTag(req.Metadata[metadataRocketmqTag]). if e != nil {
WithKeys([]string{req.Metadata[metadataRocketmqKey]}). r.logger.Warnf("rocketmq subscribe failed: %v", e)
WithShardingKey(req.Metadata[metadataRocketmqShardingKey]) return e
}
var cb func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error)
if utils.IsTruthy(r.metadata.ConsumeOrderly) {
cb = r.consumeMessageOrderly(req.Topic, selector, handler)
} else {
cb = r.consumeMessageConcurrently(req.Topic, selector, handler)
}
r.consumerLock.Lock()
defer r.consumerLock.Unlock()
r.topics[req.Topic] = *selector
if nil == r.consumer {
// if consumer is not initialized, initialize it
if r.consumer, e = r.setUpConsumer(); e != nil {
return fmt.Errorf("consumer setup failed: %v", e)
}
// consumer will start after one second.
// Consumers who complete the subscription within 1 second, will begin the subscription immediately upon launch.
// Consumers who do not complete the subscription within 1 second, will start the subscription after 20 seconds.
// The 20-second time is the interval for RocketMQ to refresh the topic route.
go func() {
time.Sleep(time.Second)
if e = r.consumer.Start(); e == nil {
r.logger.Infof("consumer start success: Group[%s], Topics[%v]", r.metadata.ConsumerGroup, r.topics)
} else {
r.logger.Errorf("consumer start failed: %v", e)
}
}()
}
// subscribe topic
if e = r.consumer.Subscribe(req.Topic, *selector, cb); e != nil {
r.logger.Errorf("subscribe topic[%s] Group[%s] failed, error: %v", req.Topic, r.metadata.ConsumerGroup, e)
return e
}
r.logger.Infof("subscribe topic[%s] success, Group[%s], Topics[%v]", req.Topic, r.metadata.ConsumerGroup, r.topics)
return nil
} }
type mqSubscribeCallback func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error) func buildMessageSelector(req pubsub.SubscribeRequest) (*mqc.MessageSelector, error) {
if req.Metadata == nil {
req.Metadata = make(map[string]string)
}
mqExpr := req.Metadata[metadataRocketmqExpression]
mqType := req.Metadata[metadataRocketmqType]
func (r *rocketMQ) adaptCallback(topic, consumerGroup, mqType, mqExpr string, handler pubsub.Handler) mqSubscribeCallback { var ExpressionType mqc.ExpressionType
return func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error) { switch strings.ToUpper(mqType) {
success := true case "", string(mqc.TAG):
for _, msg := range msgs { ExpressionType = mqc.TAG
cloudEventsMap := pubsub.NewCloudEventsEnvelope(msg.MsgId, msg.StoreHost, r.name, msg.GetProperty(primitive.PropertyKeys), msg.Topic, r.name, r.metadata.ContentType, msg.Body, "", "") case string(mqc.SQL92):
ExpressionType = mqc.SQL92
default:
return nil, fmt.Errorf("rocketmq msg type invalid: %s, expected value is 'tag' or 'sql92' or ''", mqType)
}
return &mqc.MessageSelector{
Type: ExpressionType,
Expression: mqExpr,
}, nil
}
func (r *rocketMQ) buildPubsubMessage(topic, mqType, mqExpr string, msg *primitive.MessageExt) (*pubsub.NewMessage, error) {
cloudEventsMap := pubsub.NewCloudEventsEnvelope(msg.MsgId, msg.StoreHost, "", "", msg.Topic, r.name, r.metadata.ContentType, msg.Body, "", "")
cloudEventsMap[primitive.PropertyKeys] = msg.GetKeys()
cloudEventsMap[primitive.PropertyShardingKey] = msg.GetShardingKey()
cloudEventsMap[primitive.PropertyTags] = msg.GetTags()
cloudEventsMap[primitive.PropertyMsgRegion] = msg.GetRegionID()
for k, v := range msg.GetProperties() {
if _, ok := r.msgProperties[k]; ok {
cloudEventsMap[k] = v
}
if strings.EqualFold(k, pubsub.TraceIDField) {
cloudEventsMap[pubsub.TraceIDField] = v
}
}
dataBytes, err := json.Marshal(cloudEventsMap) dataBytes, err := json.Marshal(cloudEventsMap)
if err != nil { if err != nil {
r.logger.Warn("rocketmq fail to marshal cloudEventsMap message, topic:%s cloudEventsMap-length:%d err:%newMessage ", msg.Topic, len(msg.Body), err) return nil, err
success = false
continue
} }
metadata := map[string]string{ metadata := map[string]string{
metadataRocketmqType: mqType, metadataRocketmqType: mqType,
metadataRocketmqExpression: mqExpr, metadataRocketmqExpression: mqExpr,
metadataRocketmqConsumerGroup: consumerGroup, metadataRocketmqConsumerGroup: r.metadata.ProducerGroup,
} }
if msg.Queue != nil { if msg.Queue != nil {
metadata[metadataRocketmqBrokerName] = msg.Queue.BrokerName metadata[metadataRocketmqBrokerName] = msg.Queue.BrokerName
metadata[metadataRocketmqQueueID] = strconv.Itoa(msg.Queue.QueueId)
} }
newMessage := &pubsub.NewMessage{ return &pubsub.NewMessage{
Topic: topic, Topic: topic,
Data: dataBytes, Data: dataBytes,
Metadata: metadata, Metadata: metadata,
}, nil
}
func (r *rocketMQ) consumeMessageOrderly(topic string, selector *mqc.MessageSelector, handler pubsub.Handler) func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error) {
return func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error) {
for _, msg := range msgs {
newMessage, e := r.buildPubsubMessage(topic, string(selector.Type), selector.Expression, msg)
if e != nil {
r.logger.Errorf("rocketmq message consume fail, topic: %s, msgId: %s, error: %v", newMessage.Topic, msg.MsgId, e)
return mqc.SuspendCurrentQueueAMoment, nil
} }
err = handler(ctx, newMessage) e = handler(ctx, newMessage)
if err != nil { if e != nil {
r.logger.Errorf("rocketmq error: fail to process message. topic:%s cloudEventsMap-length:%d err:%v.", newMessage.Topic, len(msg.Body), err) r.logger.Errorf("rocketmq message consume fail, topic: %s, msgId: %s, error: %v", newMessage.Topic, msg.MsgId, e)
success = false return mqc.SuspendCurrentQueueAMoment, nil
} }
} }
if !success {
return mqc.ConsumeRetryLater, nil
}
return mqc.ConsumeSuccess, nil return mqc.ConsumeSuccess, nil
} }
} }
func (r *rocketMQ) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error { func (r *rocketMQ) consumeMessageConcurrently(topic string, selector *mqc.MessageSelector, handler pubsub.Handler) func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error) {
if req.Metadata == nil { return func(ctx context.Context, msgs ...*primitive.MessageExt) (mqc.ConsumeResult, error) {
req.Metadata = make(map[string]string) for _, msg := range msgs {
newMessage, e := r.buildPubsubMessage(topic, string(selector.Type), selector.Expression, msg)
if e != nil {
r.logger.Errorf("rocketmq message consume fail, topic: %s, msgId: %s, error: %v", newMessage.Topic, msg.MsgId, e)
return mqc.ConsumeRetryLater, nil
} }
var ( e = handler(ctx, newMessage)
mqExpr = req.Metadata[metadataRocketmqExpression] if e != nil {
mqType = req.Metadata[metadataRocketmqType] r.logger.Errorf("rocketmq message consume fail, topic: %s, msgId: %s, error: %v", newMessage.Topic, msg.MsgId, e)
) return mqc.ConsumeRetryLater, nil
if !r.validMqTypeParams(mqType) {
return ErrRocketmqValidPublishMsgTyp
}
consumerGroup := r.metadata.ConsumerGroup
if group, ok := req.Metadata[metadataRocketmqConsumerGroup]; ok {
consumerGroup = group
}
r.consumerLock.Lock()
defer r.consumerLock.Unlock()
// Start the subscription
// When the connection is ready, add the topic
// Use the global context here to maintain the connection
r.startSubscription(ctx, func() {
r.topics[req.Topic] = topicData{
handler: handler,
selector: mqc.MessageSelector{
Type: mqc.ExpressionType(mqType),
Expression: mqExpr,
},
consumerGroup: consumerGroup,
mqExpr: mqExpr,
mqType: mqType,
}
})
// Listen for context cancelation to remove the subscription
go func() {
select {
case <-ctx.Done():
case <-r.ctx.Done():
}
r.consumerLock.Lock()
defer r.consumerLock.Unlock()
// If this is the last subscription or if the global context is done, close the connection entirely
if len(r.topics) <= 1 || r.ctx.Err() != nil {
_ = r.consumer.Shutdown()
r.consumer = nil
delete(r.topics, req.Topic)
return
}
// Reconnect with one less topic
r.startSubscription(r.ctx, func() {
delete(r.topics, req.Topic)
})
}()
return nil
}
// Should be wrapped around r.consumerLock lock
func (r *rocketMQ) startSubscription(ctx context.Context, onConnRready func()) (err error) {
// reset synchronization
if r.consumer != nil {
r.logger.Infof("re-initializing the consumer")
_ = r.consumer.Shutdown()
r.consumer = nil
} else {
r.logger.Infof("initializing the consumer")
}
r.consumer, err = r.setUpConsumer()
if err != nil {
r.consumer = nil
return err
}
// Invoke onConnReady so changes to the topics can be made safely
onConnRready()
for topic, data := range r.topics {
cb := r.adaptCallback(topic, r.metadata.ConsumerGroup, string(data.selector.Type), data.selector.Expression, data.handler)
err = r.consumer.Subscribe(topic, data.selector, cb)
if err != nil {
r.logger.Errorf("subscribe topic:%v failed,error:%v", topic, err)
continue
} }
} }
return mqc.ConsumeSuccess, nil
err = r.consumer.Start()
if err != nil {
return fmt.Errorf("consumer start failed. %w", err)
} }
return nil
}
func (r *rocketMQ) validMqTypeParams(mqType string) bool {
if len(mqType) != 0 && (mqType != string(mqc.SQL92) && mqType != string(mqc.TAG)) {
r.logger.Warnf("rocketmq subscribe failed because some illegal type(%s).", mqType)
return false
}
return true
} }
func (r *rocketMQ) Close() error { func (r *rocketMQ) Close() error {
@ -360,7 +493,7 @@ func (r *rocketMQ) Close() error {
r.producer = nil r.producer = nil
if r.consumer != nil { if nil != r.consumer {
_ = r.consumer.Shutdown() _ = r.consumer.Shutdown()
r.consumer = nil r.consumer = nil
} }

View File

@ -16,6 +16,7 @@ package rocketmq
import ( import (
"context" "context"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -30,56 +31,192 @@ func getTestMetadata() map[string]string {
"consumerGroup": "dapr.rocketmq.producer", "consumerGroup": "dapr.rocketmq.producer",
"accessKey": "RocketMQ", "accessKey": "RocketMQ",
"secretKey": "12345", "secretKey": "12345",
"consumerBatchSize": "1",
"consumerThreadNums": "2",
"retries": "2", "retries": "2",
"sendMsgTimeout": "30",
} }
} }
func TestParseRocketMQMetadata(t *testing.T) { func TestParseRocketMQMetadata(t *testing.T) {
t.Run("correct metadata", func(t *testing.T) {
meta := getTestMetadata() meta := getTestMetadata()
_, err := parseRocketMQMetaData(pubsub.Metadata{Base: mdata.Base{Properties: meta}}, logger.NewLogger("test")) _, err := parseRocketMQMetaData(pubsub.Metadata{Base: mdata.Base{Properties: meta}})
assert.Nil(t, err) assert.Nil(t, err)
}) }
t.Run("correct init", func(t *testing.T) { func TestRocketMQ_Init(t *testing.T) {
meta := getTestMetadata() meta := getTestMetadata()
r := NewRocketMQ(logger.NewLogger("test")) r := NewRocketMQ(logger.NewLogger("test"))
err := r.Init(pubsub.Metadata{Base: mdata.Base{Properties: meta}}) err := r.Init(pubsub.Metadata{Base: mdata.Base{Properties: meta}})
assert.Nil(t, err) assert.Nil(t, err)
}) }
func TestRocketMQ_Publish_Currently(t *testing.T) {
l, r, e := BuildRocketMQ()
assert.Nil(t, e)
t.Run("setup producer missing nameserver", func(t *testing.T) {
meta := getTestMetadata()
delete(meta, "nameServer")
r := NewRocketMQ(logger.NewLogger("test"))
err := r.Init(pubsub.Metadata{Base: mdata.Base{Properties: meta}})
assert.Nil(t, err)
req := &pubsub.PublishRequest{ req := &pubsub.PublishRequest{
Data: []byte("hello"), Data: []byte("{\"key\": 1, \"value\": \"1\"}"),
PubsubName: "rocketmq", PubsubName: "rocketmq",
Topic: "test", Topic: "ZCY_ZHIXING_TEST_test",
Metadata: map[string]string{}, Metadata: map[string]string{},
} }
err = r.Publish(req) e = r.Publish(req)
assert.NotNil(t, err) if e != nil {
}) l.Error(e)
return
}
assert.Nil(t, e)
t.Run("subscribe illegal type", func(t *testing.T) { req = &pubsub.PublishRequest{
meta := getTestMetadata() Data: []byte("{\"key\": 2, \"value\": \"2\"}"),
r := NewRocketMQ(logger.NewLogger("test")) PubsubName: "rocketmq",
err := r.Init(pubsub.Metadata{Base: mdata.Base{Properties: meta}}) Topic: "ZCY_ZHIXING_TEST_test",
assert.Nil(t, err)
req := pubsub.SubscribeRequest{
Topic: "test",
Metadata: map[string]string{ Metadata: map[string]string{
metadataRocketmqType: "incorrect type", "rocketmq-tag": "tag",
"rocketmq-key": "2",
"rocketmq-shardingkey": "key",
"traceId": "4a09073987b148348ae0420435cddf5e",
}, },
} }
e = r.Publish(req)
assert.Nil(t, e)
req = &pubsub.PublishRequest{
Data: []byte("{\"key\": 3, \"value\": \"3\"}"),
PubsubName: "rocketmq",
Topic: "ZCY_ZHIXING_TEST_test",
Metadata: map[string]string{
"rocketmq-tag": "tag",
"rocketmq-key": "3",
"rocketmq-shardingkey": "key",
},
}
e = r.Publish(req)
assert.Nil(t, e)
req = &pubsub.PublishRequest{
Data: []byte("{\"key\": 4, \"value\": \"4\"}"),
PubsubName: "rocketmq",
Topic: "ZCY_ZHIXING_TEST_test",
Metadata: map[string]string{
"rocketmq-tag": "tag",
"rocketmq-key": "4",
"rocketmq-shardingkey": "key",
},
}
e = r.Publish(req)
assert.Nil(t, e)
}
func TestRocketMQ_Publish_Orderly(t *testing.T) {
l, r, e := BuildRocketMQ()
assert.Nil(t, e)
req := &pubsub.PublishRequest{
Data: []byte("{\"key\": 1, \"value\": \"1\", \"sKey\": \"sKeyHello\"}"),
PubsubName: "rocketmq",
Topic: "ZCY_ZHIXING_TEST_ORDER_test",
Metadata: map[string]string{
"rocketmq-tag": "tag",
"rocketmq-key": "1",
"rocketmq-shardingkey": "sKey",
"rocketmq-queue": "2",
},
}
e = r.Publish(req)
if e != nil {
l.Error(e)
return
}
assert.Nil(t, e)
req = &pubsub.PublishRequest{
Data: []byte("{\"key\": 2, \"value\": \"2\", \"sKey\": \"sKeyHello\"}"),
PubsubName: "rocketmq",
Topic: "ZCY_ZHIXING_TEST_ORDER_test",
Metadata: map[string]string{
"rocketmq-tag": "tag",
"rocketmq-key": "2",
"rocketmq-shardingkey": "sKey",
"rocketmq-queue": "3",
},
}
e = r.Publish(req)
assert.Nil(t, e)
req = &pubsub.PublishRequest{
Data: []byte("{\"key\": 3, \"value\": \"3\", \"sKey\": \"sKeyHello\"}"),
PubsubName: "rocketmq",
Topic: "ZCY_ZHIXING_TEST_ORDER_test",
Metadata: map[string]string{
"rocketmq-tag": "tag",
"rocketmq-key": "3",
"rocketmq-shardingkey": "sKey",
},
}
e = r.Publish(req)
assert.Nil(t, e)
}
func TestRocketMQ_Subscribe_Currently(t *testing.T) {
l, r, e := BuildRocketMQ()
assert.Nil(t, e)
req := pubsub.SubscribeRequest{
Topic: "ZCY_ZHIXING_TEST_test",
}
handler := func(ctx context.Context, msg *pubsub.NewMessage) error { handler := func(ctx context.Context, msg *pubsub.NewMessage) error {
l.Info(string(msg.Data))
return nil return nil
} }
err = r.Subscribe(context.Background(), req, handler) e = r.Subscribe(context.Background(), req, handler)
assert.NotNil(t, err) if e != nil {
}) l.Error(e)
return
}
assert.Nil(t, e)
time.Sleep(20 * time.Second)
}
func TestRocketMQ_Subscribe_Orderly(t *testing.T) {
l, r, e := BuildRocketMQ()
assert.Nil(t, e)
handler := func(ctx context.Context, msg *pubsub.NewMessage) error {
l.Info(msg.Topic, string(msg.Data))
return nil
}
req := pubsub.SubscribeRequest{
Topic: "ZCY_ZHIXING_TEST_ORDER_test",
Metadata: map[string]string{
metadataRocketmqType: "tag",
metadataRocketmqExpression: "*",
},
}
e = r.Subscribe(context.Background(), req, handler)
if e != nil {
l.Error(e)
return
}
assert.Nil(t, e)
req = pubsub.SubscribeRequest{
Topic: "ZCY_ZHIXING_TEST_test",
Metadata: map[string]string{
metadataRocketmqType: "tag",
metadataRocketmqExpression: "*",
},
}
e = r.Subscribe(context.Background(), req, handler)
assert.Nil(t, e)
time.Sleep(20 * time.Second)
}
func BuildRocketMQ() (logger.Logger, pubsub.PubSub, error) {
meta := getTestMetadata()
l := logger.NewLogger("test")
r := NewRocketMQ(l)
err := r.Init(pubsub.Metadata{Base: mdata.Base{Properties: meta}})
return l, r, err
} }

View File

@ -4,4 +4,4 @@ Secret Stores provide a common way to interact with different secret stores, clo
## Implementing a new Secret Store ## Implementing a new Secret Store
A compliant secret store needs to implement the `SecretStore` inteface included in the [`secret_store.go`](secret_store.go) file. A compliant secret store needs to implement the `SecretStore` interface included in the [`secret_store.go`](secret_store.go) file.

View File

@ -15,8 +15,8 @@ package parameterstore
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"reflect"
"strconv" "strconv"
"time" "time"
@ -25,6 +25,7 @@ import (
util "github.com/alibabacloud-go/tea-utils/service" util "github.com/alibabacloud-go/tea-utils/service"
"github.com/alibabacloud-go/tea/tea" "github.com/alibabacloud-go/tea/tea"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/secretstores" "github.com/dapr/components-contrib/secretstores"
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
) )
@ -42,7 +43,7 @@ func NewParameterStore(logger logger.Logger) secretstores.SecretStore {
return &oosSecretStore{logger: logger} return &oosSecretStore{logger: logger}
} }
type parameterStoreMetaData struct { type ParameterStoreMetaData struct {
RegionID *string `json:"regionId"` RegionID *string `json:"regionId"`
AccessKeyID *string `json:"accessKeyId"` AccessKeyID *string `json:"accessKeyId"`
AccessKeySecret *string `json:"accessKeySecret"` AccessKeySecret *string `json:"accessKeySecret"`
@ -151,7 +152,7 @@ func (o *oosSecretStore) BulkGetSecret(ctx context.Context, req secretstores.Bul
return response, nil return response, nil
} }
func (o *oosSecretStore) getClient(metadata *parameterStoreMetaData) (*oos.Client, error) { func (o *oosSecretStore) getClient(metadata *ParameterStoreMetaData) (*oos.Client, error) {
config := &client.Config{ config := &client.Config{
RegionId: metadata.RegionID, RegionId: metadata.RegionID,
AccessKeyId: metadata.AccessKeyID, AccessKeyId: metadata.AccessKeyID,
@ -161,18 +162,9 @@ func (o *oosSecretStore) getClient(metadata *parameterStoreMetaData) (*oos.Clien
return oos.NewClient(config) return oos.NewClient(config)
} }
func (o *oosSecretStore) getParameterStoreMetadata(spec secretstores.Metadata) (*parameterStoreMetaData, error) { func (o *oosSecretStore) getParameterStoreMetadata(spec secretstores.Metadata) (*ParameterStoreMetaData, error) {
b, err := json.Marshal(spec.Properties) meta := ParameterStoreMetaData{}
if err != nil { metadata.DecodeMetadata(spec.Properties, &meta)
return nil, err
}
var meta parameterStoreMetaData
err = json.Unmarshal(b, &meta)
if err != nil {
return nil, err
}
return &meta, nil return &meta, nil
} }
@ -204,3 +196,10 @@ func (o *oosSecretStore) getPathFromMetadata(metadata map[string]string) *string
func (o *oosSecretStore) Features() []secretstores.Feature { func (o *oosSecretStore) Features() []secretstores.Feature {
return []secretstores.Feature{} // No Feature supported. return []secretstores.Feature{} // No Feature supported.
} }
func (o *oosSecretStore) GetComponentMetadata() map[string]string {
metadataStruct := ParameterStoreMetaData{}
metadataInfo := map[string]string{}
metadata.GetMetadataInfoFromStructType(reflect.TypeOf(metadataStruct), &metadataInfo)
return metadataInfo
}

Some files were not shown because too many files have changed in this diff Show More