Merge branch 'master' into state-store-ttl-mongodb

This commit is contained in:
Bernd Verst 2023-02-17 16:34:25 -08:00 committed by GitHub
commit 1b6e16fe41
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 1255 additions and 902 deletions

View File

@ -40,4 +40,4 @@
"state/oci",
"state/utils"
]
}
}

View File

@ -1,76 +1,76 @@
{
"name": "Dapr Components Contributor Environment",
"image": "ghcr.io/dapr/dapr-dev:latest",
"containerEnv": {
// Uncomment to overwrite devcontainer .kube/config and .minikube certs with the localhost versions
// each time the devcontainer starts, if the respective .kube-localhost/config and .minikube-localhost
// folders respectively are bind mounted to the devcontainer.
// "SYNC_LOCALHOST_KUBECONFIG": "true",
"name": "Dapr Components Contributor Environment",
"image": "ghcr.io/dapr/dapr-dev:latest",
"containerEnv": {
// Uncomment to overwrite devcontainer .kube/config and .minikube certs with the localhost versions
// each time the devcontainer starts, if the respective .kube-localhost/config and .minikube-localhost
// folders respectively are bind mounted to the devcontainer.
// "SYNC_LOCALHOST_KUBECONFIG": "true",
// Uncomment to disable docker-in-docker and automatically proxy default /var/run/docker.sock to
// the localhost bind-mount /var/run/docker-host.sock.
// "BIND_LOCALHOST_DOCKER": "true",
// Uncomment to disable docker-in-docker and automatically proxy default /var/run/docker.sock to
// the localhost bind-mount /var/run/docker-host.sock.
// "BIND_LOCALHOST_DOCKER": "true",
// Necessary for components-contrib's certification tests
"GOLANG_PROTOBUF_REGISTRATION_CONFLICT": "true"
},
"extensions": [
"davidanson.vscode-markdownlint",
"golang.go",
"ms-azuretools.vscode-dapr",
"ms-azuretools.vscode-docker",
"ms-kubernetes-tools.vscode-kubernetes-tools"
],
"features": {
"ghcr.io/devcontainers/features/sshd:1": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/azure-cli:1": {}
},
"mounts": [
// Mount docker-in-docker library volume
"type=volume,source=dind-var-lib-docker,target=/var/lib/docker",
// Necessary for components-contrib's certification tests
"GOLANG_PROTOBUF_REGISTRATION_CONFLICT": "true"
},
"extensions": [
"davidanson.vscode-markdownlint",
"golang.go",
"ms-azuretools.vscode-dapr",
"ms-azuretools.vscode-docker",
"ms-kubernetes-tools.vscode-kubernetes-tools"
],
"features": {
"ghcr.io/devcontainers/features/sshd:1": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/azure-cli:1": {}
},
"mounts": [
// Mount docker-in-docker library volume
"type=volume,source=dind-var-lib-docker,target=/var/lib/docker",
// Bind mount docker socket under an alias to support docker-from-docker
"type=bind,source=/var/run/docker.sock,target=/var/run/docker-host.sock",
// Bind mount docker socket under an alias to support docker-from-docker
"type=bind,source=/var/run/docker.sock,target=/var/run/docker-host.sock"
// Uncomment to clone local .kube/config into devcontainer
// "type=bind,source=${env:HOME}${env:USERPROFILE}/.kube,target=/home/dapr/.kube-localhost",
// Uncomment to clone local .kube/config into devcontainer
// "type=bind,source=${env:HOME}${env:USERPROFILE}/.kube,target=/home/dapr/.kube-localhost",
// Uncomment to additionally clone minikube certs into devcontainer for use with .kube/config
// "type=bind,source=${env:HOME}${env:USERPROFILE}/.minikube,target=/home/dapr/.minikube-localhost"
],
// Always run image-defined default command
"overrideCommand": false,
// On Linux, this will prevent new files getting created as root, but you
// may need to update the USER_UID and USER_GID in docker/Dockerfile-dev
// to match your user if not 1000.
"remoteUser": "dapr",
"runArgs": [
// Enable ptrace-based debugging for go
"--cap-add=SYS_PTRACE",
"--security-opt",
"seccomp=unconfined",
// Uncomment to additionally clone minikube certs into devcontainer for use with .kube/config
// "type=bind,source=${env:HOME}${env:USERPROFILE}/.minikube,target=/home/dapr/.minikube-localhost"
],
// Always run image-defined default command
"overrideCommand": false,
// On Linux, this will prevent new files getting created as root, but you
// may need to update the USER_UID and USER_GID in docker/Dockerfile-dev
// to match your user if not 1000.
"remoteUser": "dapr",
"runArgs": [
// Enable ptrace-based debugging for go
"--cap-add=SYS_PTRACE",
"--security-opt",
"seccomp=unconfined",
// Uncomment to bind to host network for local devcontainer; this is necessary if using the
// bind-mounted /var/run/docker-host.sock directly.
// "--net=host",
// Uncomment to bind to host network for local devcontainer; this is necessary if using the
// bind-mounted /var/run/docker-host.sock directly.
// "--net=host",
// Enable docker-in-docker configuration. Comment out if not using for better security.
"--privileged",
// Enable docker-in-docker configuration. Comment out if not using for better security.
"--privileged",
// Run the entrypoint defined in container image.
"--init"
],
"settings": {
"go.toolsManagement.checkForUpdates": "local",
"go.useLanguageServer": true,
"go.gopath": "/go",
"go.buildTags": "e2e,perf,conftests,unit,integration_test,certtests",
"git.alwaysSignOff": true,
"terminal.integrated.env.linux": {
"GOLANG_PROTOBUF_REGISTRATION_CONFLICT": "ignore"
}
},
"workspaceFolder": "/workspaces/components-contrib",
"workspaceMount": "type=bind,source=${localWorkspaceFolder},target=/workspaces/components-contrib",
// Run the entrypoint defined in container image.
"--init"
],
"settings": {
"go.toolsManagement.checkForUpdates": "local",
"go.useLanguageServer": true,
"go.gopath": "/go",
"go.buildTags": "e2e,perf,conftests,unit,integration_test,certtests",
"git.alwaysSignOff": true,
"terminal.integrated.env.linux": {
"GOLANG_PROTOBUF_REGISTRATION_CONFLICT": "ignore"
}
},
"workspaceFolder": "/workspaces/components-contrib",
"workspaceMount": "type=bind,source=${localWorkspaceFolder},target=/workspaces/components-contrib"
}

View File

@ -0,0 +1,9 @@
#!/bin/sh
set +e
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/certification/pubsub/aws/snssqs"
# Run Terraform
terraform destroy -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,29 @@
#!/bin/sh
set -e
# Set variables for GitHub Actions
echo "AWS_REGION=us-east-1" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_1=sqssnscerttest-q1-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_2=sqssnscerttest-q2-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_3=sqssnscerttest-q3-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_3=sqssnscerttest-t3-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_MVT=sqssnscerttest-q-mvt-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_MVT=sqssnscerttest-tp-mvt-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_DLIN=sqssnscerttest-dlq-in-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_DLOUT=sqssnscerttest-dlq-out-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_DLIN=sqssnscerttest-dlt-in-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_FIFO=sqssnscerttest-q-fifo-$UNIQUE_ID.fifo" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_FIFO=sqssnscerttest-t-fifo-$UNIQUE_ID.fifo" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_FIFO_GROUP_ID=sqssnscerttest-q-fifo-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_NODRT=sqssnscerttest-q-nodrt-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_NODRT=sqssnscerttest-t-nodrt-$UNIQUE_ID" >> $GITHUB_ENV
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/certification/pubsub/aws/snssqs"
# Run Terraform
terraform init
terraform validate -no-color
terraform plan -no-color -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"
terraform apply -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,9 @@
#!/bin/sh
set +e
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/certification/state/aws/dynamodb"
# Run Terraform
terraform destroy -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,17 @@
#!/bin/sh
set -e
# Set variables for GitHub Actions
echo "AWS_REGION=us-east-1" >> $GITHUB_ENV
echo "STATE_AWS_DYNAMODB_TABLE_1=certification-test-terraform-basic-$UNIQUE_ID" >> $GITHUB_ENV
echo "STATE_AWS_DYNAMODB_TABLE_2=certification-test-terraform-partition-key-$UNIQUE_ID" >> $GITHUB_ENV
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/certification/state/aws/dynamodb"
# Run Terraform
terraform init
terraform validate -no-color
terraform plan -no-color -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"
terraform apply -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,11 @@
#!/bin/sh
set +e
# Stop ngrok
echo "GET ngrok tunnels:"
curl http://localhost:4040/api/tunnels
echo "GET ngrok http requests:"
curl http://localhost:4040/api/requests/http
pkill ngrok
cat /tmp/ngrok.log

View File

@ -0,0 +1,18 @@
#!/bin/sh
set -e
# Start ngrok
wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
unzip -qq ngrok-stable-linux-amd64.zip
./ngrok authtoken ${AzureEventGridNgrokToken}
./ngrok http -log=stdout --log-level debug -host-header=localhost 9000 > /tmp/ngrok.log &
sleep 10
NGROK_ENDPOINT=`cat /tmp/ngrok.log | grep -Eom1 'https://.*' | sed 's/\s.*//'`
echo "Ngrok endpoint: ${NGROK_ENDPOINT}"
echo "AzureEventGridSubscriberEndpoint=${NGROK_ENDPOINT}/api/events" >> $GITHUB_ENV
cat /tmp/ngrok.log
# Schedule trigger to kill ngrok
bash -c "sleep 600 && pkill ngrok" &

View File

@ -0,0 +1,7 @@
#!/bin/sh
set -e
export INFLUX_TOKEN=$(openssl rand -base64 32)
echo "INFLUX_TOKEN=$INFLUX_TOKEN" >> $GITHUB_ENV
docker-compose -f .github/infrastructure/docker-compose-influxdb.yml -p influxdb up -d

View File

@ -0,0 +1,9 @@
#!/bin/sh
set +e
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/conformance/pubsub/aws/snssqs"
# Run Terraform
terraform destroy -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,18 @@
#!/bin/sh
set -e
# Set variables for GitHub Actions
echo "PUBSUB_AWS_SNSSQS_QUEUE=testQueue-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC=testTopic-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_MULTI_1=multiTopic1-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_MULTI_2=multiTopic2-$UNIQUE_ID" >> $GITHUB_ENV
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/conformance/pubsub/aws/snssqs"
# Run Terraform
terraform init
terraform validate -no-color
terraform plan -no-color -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"
terraform apply -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,6 @@
#!/bin/sh
set -e
kubectl apply -f tests/config/kind-data.yaml
echo "NAMESPACE=default" >> $GITHUB_ENV

View File

@ -0,0 +1,9 @@
#!/bin/sh
set +e
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/conformance/state/aws/dynamodb"
# Run Terraform
terraform destroy -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,16 @@
#!/bin/sh
set -e
# Set variables for GitHub Actions
echo "STATE_AWS_DYNAMODB_TABLE_1=conformance-test-terraform-basic-${UNIQUE_ID}" >> $GITHUB_ENV
echo "STATE_AWS_DYNAMODB_TABLE_2=conformance-test-terraform-partition-key-${UNIQUE_ID}" >> $GITHUB_ENV
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/conformance/state/aws/dynamodb"
# Run Terraform
terraform init
terraform validate -no-color
terraform plan -no-color -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"
terraform apply -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"

View File

@ -0,0 +1,10 @@
#!/bin/sh
set +e
# Wait for the creation of the DB by the test to propagate to ARM, otherwise deletion succeeds as no-op.
# The wait should be under 30s, but is capped at 1m as flakiness here results in an accumulation of expensive DB instances over time.
# Also note that the deletion call only blocks until the request is process, do not rely on it for mutex on the same DB,
# deletion may be ongoing in sequential runs.
sleep 1m
az sql db delete --resource-group "$AzureResourceGroupName" --server "$AzureSqlServerName" -n "$AzureSqlServerDbName" --yes

View File

@ -0,0 +1,7 @@
#!/bin/sh
set -e
# Use UUID with `-` stripped out for DB names to prevent collisions between workflows
AzureSqlServerDbName=$(cat /proc/sys/kernel/random/uuid | sed -E 's/-//g')
echo "AzureSqlServerDbName=$AzureSqlServerDbName" >> $GITHUB_ENV

View File

@ -0,0 +1,11 @@
#!/bin/sh
set +e
# Delete the Worker
curl -X DELETE "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/workers/scripts/${CloudflareWorkerName}" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}"
# Delete the KV namespace
curl -X DELETE "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/storage/kv/namespaces/${CloudflareKVNamespaceID}" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}"

View File

@ -0,0 +1,30 @@
#!/bin/sh
set -e
# Rebuild the Worker
(
cd internal/component/cloudflare/worker-src;
npm ci;
npm run build;
)
# Check that the code of the worker is correct
git diff --exit-code ./internal/component/cloudflare/workers/code \
|| (echo "The source code of the Cloudflare Worker has changed, but the Worker has not been recompiled. Please re-compile the Worker by running 'npm ci && npm run build' in 'internal/component/cloudflare/worker-src'" && exit 1)
# Remove dashes from UNIQUE_ID
Suffix=$(echo "$UNIQUE_ID" | sed -E 's/-//g')
# Ensure the Workers KV namespace exists
CloudflareWorkerName="daprconfkv${Suffix}"
CloudflareKVNamespaceID=$( curl -s -X POST "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/storage/kv/namespaces" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}" \
-H "Content-Type: application/json" \
--data "{\"title\":\"${CloudflareWorkerName}\"}" \
| jq -r ".result.id" )
echo "CloudflareWorkerName=${CloudflareWorkerName}" >> $GITHUB_ENV
echo "CloudflareAPIToken=${CLOUDFLARE_API_TOKEN}" >> $GITHUB_ENV
echo "CloudflareAccountID=${CLOUDFLARE_ACCOUNT_ID}" >> $GITHUB_ENV
echo "CloudflareKVNamespaceID=${CloudflareKVNamespaceID}" >> $GITHUB_ENV

View File

@ -0,0 +1,8 @@
#!/bin/bash
set -e
FILE="$1"
PROJECT="${2:-$FILE}"
docker-compose -f .github/infrastructure/docker-compose-${FILE}.yml -p ${PROJECT} up -d

View File

@ -1,49 +1,59 @@
// list of owner who can control dapr-bot workflow
// TODO: Read owners from OWNERS file.
const owners = [
"yaron2",
"berndverst",
"artursouza",
"mukundansundar",
"halspang",
"tanvigour",
"pkedy",
"amuluyavarote",
"daixiang0",
"ItalyPaleAle",
"jjcollinge",
"pravinpushkar",
"shivamkm07",
"shubham1172",
"skyao",
"msfussell",
"Taction",
"RyanLettieri",
"DeepanshuA",
"yash-nisar",
"addjuarez",
"tmacam",
];
'addjuarez',
'amuluyavarote',
'artursouza',
'berndverst',
'daixiang0',
'DeepanshuA',
'halspang',
'ItalyPaleAle',
'jjcollinge',
'msfussell',
'mukundansundar',
'pkedy',
'pravinpushkar',
'RyanLettieri',
'shivamkm07',
'shubham1172',
'skyao',
'Taction',
'tmacam',
'yaron2',
'yash-nisar',
]
const docsIssueBodyTpl = (issueNumber) => `This issue was automatically created by \
const docsIssueBodyTpl = (
issueNumber
) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml) because a \"documentation required\" label \
was added to dapr/components-contrib#${issueNumber}. \n\n\
TODO: Add more details as per [this template](.github/ISSUE_TEMPLATE/new-content-needed.md).`;
TODO: Add more details as per [this template](.github/ISSUE_TEMPLATE/new-content-needed.md).`
const newComponentBodyTpl = (issueNumber) => `This issue was automatically created by \
const newComponentBodyTpl = (
issueNumber
) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml) because a \"new component\" label \
was added to dapr/components-contrib#${issueNumber}. \n\n\
Please register the component in [cmd/daprd/components](https://github.com/dapr/dapr/tree/master/cmd/daprd/components), \
similar to the ones in the folder (one file per component).`;
similar to the ones in the folder (one file per component).`
module.exports = async ({ github, context }) => {
if (context.eventName == "issue_comment" && context.payload.action == "created") {
await handleIssueCommentCreate({ github, context });
} else if ((context.eventName == "issues" || context.eventName == "pull_request") && context.payload.action == "labeled") {
await handleIssueOrPrLabeled({ github, context });
if (
context.eventName == 'issue_comment' &&
context.payload.action == 'created'
) {
await handleIssueCommentCreate({ github, context })
} else if (
(context.eventName == 'issues' ||
context.eventName == 'pull_request') &&
context.payload.action == 'labeled'
) {
await handleIssueOrPrLabeled({ github, context })
} else {
console.log(`[main] event ${context.eventName} not supported, exiting.`);
console.log(`[main] event ${context.eventName} not supported, exiting.`)
}
}
@ -51,78 +61,86 @@ module.exports = async ({ github, context }) => {
* Handle issue comment create event.
*/
async function handleIssueCommentCreate({ github, context }) {
const payload = context.payload;
const issue = context.issue;
const username = (context.actor || "").toLowerCase();
const isFromPulls = !!payload.issue.pull_request;
const commentBody = payload.comment.body;
const payload = context.payload
const issue = context.issue
const username = (context.actor || '').toLowerCase()
const isFromPulls = !!payload.issue.pull_request
const commentBody = payload.comment.body
if (!commentBody) {
console.log("[handleIssueCommentCreate] comment body not found, exiting.");
return;
console.log(
'[handleIssueCommentCreate] comment body not found, exiting.'
)
return
}
const command = commentBody.split(" ")[0];
const command = commentBody.split(' ')[0]
// Commands that can be executed by anyone.
if (command === "/assign") {
await cmdAssign(github, issue, username, isFromPulls);
return;
if (command === '/assign') {
await cmdAssign(github, issue, username, isFromPulls)
return
}
// Commands that can only be executed by owners.
if (owners.map((v) => v.toLowerCase()).indexOf(username) < 0) {
console.log(`[handleIssueCommentCreate] user ${username} is not an owner, exiting.`);
return;
console.log(
`[handleIssueCommentCreate] user ${username} is not an owner, exiting.`
)
return
}
switch (command) {
case "/ok-to-test":
await cmdOkToTest(github, issue, isFromPulls);
break;
case '/ok-to-test':
await cmdOkToTest(github, issue, isFromPulls)
break
default:
console.log(`[handleIssueCommentCreate] command ${command} not found, exiting.`);
break;
console.log(
`[handleIssueCommentCreate] command ${command} not found, exiting.`
)
break
}
}
/**
* Handle issue or PR labeled event.
*/
async function handleIssueOrPrLabeled({ github, context }) {
const payload = context.payload;
const label = payload.label.name;
const issueNumber = payload.issue.number;
const payload = context.payload
const label = payload.label.name
const issueNumber = payload.issue.number
// This should not run in forks.
if (context.repo.owner !== "dapr") {
console.log("[handleIssueOrPrLabeled] not running in dapr repo, exiting.");
return;
if (context.repo.owner !== 'dapr') {
console.log(
'[handleIssueOrPrLabeled] not running in dapr repo, exiting.'
)
return
}
// Authorization is not required here because it's triggered by an issue label event.
// Only authorized users can add labels to issues.
if (label == "documentation required") {
if (label == 'documentation required') {
// Open a new docs issue
await github.rest.issues.create({
owner: "dapr",
repo: "docs",
owner: 'dapr',
repo: 'docs',
title: `New content needed for dapr/components-contrib#${issueNumber}`,
labels: ["content/missing-information", "created-by/dapr-bot"],
labels: ['content/missing-information', 'created-by/dapr-bot'],
body: docsIssueBodyTpl(issueNumber),
});
} else if (label == "new component") {
})
} else if (label == 'new component') {
// Open a new dapr issue
await github.rest.issues.create({
owner: "dapr",
repo: "dapr",
owner: 'dapr',
repo: 'dapr',
title: `Component registration for dapr/components-contrib#${issueNumber}`,
labels: ["area/components", "created-by/dapr-bot"],
labels: ['area/components', 'created-by/dapr-bot'],
body: newComponentBodyTpl(issueNumber),
});
})
} else {
console.log(`[handleIssueOrPrLabeled] label ${label} not supported, exiting.`);
console.log(
`[handleIssueOrPrLabeled] label ${label} not supported, exiting.`
)
}
}
@ -135,11 +153,15 @@ async function handleIssueOrPrLabeled({ github, context }) {
*/
async function cmdAssign(github, issue, username, isFromPulls) {
if (isFromPulls) {
console.log("[cmdAssign] pull requests unsupported, skipping command execution.");
return;
console.log(
'[cmdAssign] pull requests unsupported, skipping command execution.'
)
return
} else if (issue.assignees && issue.assignees.length !== 0) {
console.log("[cmdAssign] issue already has assignees, skipping command execution.");
return;
console.log(
'[cmdAssign] issue already has assignees, skipping command execution.'
)
return
}
await github.rest.issues.addAssignees({
@ -147,10 +169,9 @@ async function cmdAssign(github, issue, username, isFromPulls) {
repo: issue.repo,
issue_number: issue.number,
assignees: [username],
});
})
}
/**
* Trigger e2e test for the pull request.
* @param {*} github GitHub object reference
@ -159,50 +180,56 @@ async function cmdAssign(github, issue, username, isFromPulls) {
*/
async function cmdOkToTest(github, issue, isFromPulls) {
if (!isFromPulls) {
console.log("[cmdOkToTest] only pull requests supported, skipping command execution.");
return;
console.log(
'[cmdOkToTest] only pull requests supported, skipping command execution.'
)
return
}
// Get pull request
const pull = await github.rest.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number
});
pull_number: issue.number,
})
if (pull && pull.data) {
// Get commit id and repo from pull head
const testPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: "ok-to-test",
command: 'ok-to-test',
issue: issue,
};
}
// Fire repository_dispatch event to trigger certification test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "certification-test",
event_type: 'certification-test',
client_payload: testPayload,
});
})
// Fire repository_dispatch event to trigger conformance test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "conformance-test",
event_type: 'conformance-test',
client_payload: testPayload,
});
})
// Fire repository_dispatch event to trigger unit tests for other architectures and OS
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: "build-all",
event_type: 'build-all',
client_payload: testPayload,
});
})
console.log(`[cmdOkToTest] triggered certification and conformance tests for ${JSON.stringify(testPayload)}`);
console.log(
`[cmdOkToTest] triggered certification and conformance tests for ${JSON.stringify(
testPayload
)}`
)
}
}

580
.github/scripts/test-info.mjs vendored Normal file
View File

@ -0,0 +1,580 @@
import { argv, env, exit } from 'node:process'
import { writeFileSync } from 'node:fs'
/**
* List of all components
* @type {Record<string,ComponentTestProperties>}
*/
const components = {
'bindings.azure.blobstorage': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureBlobStorageAccount',
'AzureBlobStorageAccessKey',
'AzureBlobStorageContainer',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
],
},
'bindings.azure.cosmosdb': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureCosmosDB',
'AzureCosmosDBMasterKey',
'AzureCosmosDBUrl',
'AzureCosmosDB',
'AzureCosmosDBCollection',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
],
},
'bindings.azure.eventgrid': {
conformance: true,
requiredSecrets: [
'AzureEventGridNgrokToken',
'AzureEventGridAccessKey',
'AzureEventGridTopicEndpoint',
'AzureEventGridScope',
'AzureEventGridClientSecret',
'AzureEventGridClientId',
'AzureEventGridTenantId',
'AzureEventGridSubscriptionId',
],
conformanceSetup: 'conformance-bindings.azure.eventgrid-setup.sh',
conformanceDestroy: 'conformance-bindings.azure.eventgrid-destroy.sh',
},
'bindings.azure.eventhubs': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureEventHubsBindingsConnectionString',
'AzureBlobStorageAccount',
'AzureBlobStorageAccessKey',
'AzureEventHubsBindingsHub',
'AzureEventHubsBindingsNamespace',
'AzureEventHubsBindingsConsumerGroup',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientSecret',
'AzureResourceGroupName',
'AzureCertificationSubscriptionId',
'AzureEventHubsBindingsContainer',
'AzureIotHubEventHubConnectionString',
'AzureIotHubName',
'AzureIotHubBindingsConsumerGroup',
],
},
'bindings.azure.servicebusqueues': {
conformance: true,
certification: true,
requiredSecrets: ['AzureServiceBusConnectionString'],
},
'bindings.azure.storagequeues': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureBlobStorageAccessKey',
'AzureBlobStorageAccount',
'AzureBlobStorageQueue',
],
},
'bindings.cron': {
conformance: true,
certification: true,
},
'bindings.dubbo': {
certification: true,
},
'bindings.http': {
conformance: true,
},
'bindings.influx': {
conformance: true,
conformanceSetup: 'conformance-bindings.influx-setup.sh',
},
'bindings.kafka': {
certification: true,
},
'bindings.kafka-confluent': {
conformance: true,
conformanceSetup: 'docker-compose.sh confluent',
},
'bindings.kafka-wurstmeister': {
conformance: true,
conformanceSetup: 'docker-compose.sh kafka',
},
'bindings.kubemq': {
conformance: true,
conformanceSetup: 'docker-compose.sh kubemq',
},
'bindings.localstorage': {
certification: true,
},
'bindings.mqtt3-emqx': {
conformance: true,
conformanceSetup: 'docker-compose.sh emqx',
},
'bindings.mqtt3-mosquitto': {
conformance: true,
conformanceSetup: 'docker-compose.sh mosquitto',
},
'bindings.mqtt3-vernemq': {
conformance: true,
conformanceSetup: 'docker-compose.sh vernemq',
},
'bindings.postgres': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh postgresql',
},
'bindings.rabbitmq': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh rabbitmq',
},
'bindings.redis': {
certification: true,
},
'bindings.redis.v6': {
conformance: true,
conformanceSetup: 'docker-compose.sh redisjson redis',
},
'bindings.redis.v7': {
conformance: true,
conformanceSetup: 'docker-compose.sh redis7 redis',
},
'configuration.redis.v6': {
conformance: true,
conformanceSetup: 'docker-compose.sh redisjson redis',
},
'configuration.redis.v7': {
conformance: true,
conformanceSetup: 'docker-compose.sh redis7 redis',
},
'pubsub.aws.snssqs': {
certification: true,
requireAWSCredentials: true,
requireTerraform: true,
certificationSetup: 'certification-pubsub.aws.snssqs-setup.sh',
certificationDestroy: 'certification-pubsub.aws.snssqs-destroy.sh',
},
'pubsub.aws.snssqs.docker': {
conformance: true,
conformanceSetup: 'docker-compose.sh snssqs',
},
'pubsub.aws.snssqs.terraform': {
conformance: true,
requireAWSCredentials: true,
requireTerraform: true,
conformanceSetup: 'conformance-pubsub.aws.snssqs.terraform-setup.sh',
conformanceDestroy: 'conformance-pubsub.aws.snssqs.terraform-destroy.sh',
},
'pubsub.azure.eventhubs': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureEventHubsPubsubTopicActiveConnectionString',
'AzureEventHubsPubsubNamespace',
'AzureEventHubsPubsubConsumerGroup',
'AzureEventHubsPubsubNamespaceConnectionString',
'AzureBlobStorageAccount',
'AzureBlobStorageAccessKey',
'AzureEventHubsPubsubContainer',
'AzureIotHubName',
'AzureIotHubEventHubConnectionString',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
'AzureResourceGroupName',
'AzureCertificationSubscriptionId',
],
},
'pubsub.azure.servicebus.queues': {
conformance: true,
requiredSecrets: ['AzureServiceBusConnectionString'],
},
'pubsub.azure.servicebus.topics': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureServiceBusConnectionString',
'AzureServiceBusNamespace',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
],
},
'pubsub.hazelcast': {
conformance: true,
conformanceSetup: 'docker-compose.sh hazelcast',
},
'pubsub.in-memory': {
conformance: true,
},
'pubsub.jetstream': {
conformance: true,
conformanceSetup: 'docker-compose.sh jetstream',
},
'pubsub.kafka': {
certification: true,
},
'pubsub.kafka-confluent': {
conformance: true,
conformanceSetup: 'docker-compose.sh confluent',
},
'pubsub.kafka-wurstmeister': {
conformance: true,
conformanceSetup: 'docker-compose.sh kafka',
},
'pubsub.kubemq': {
conformance: true,
conformanceSetup: 'docker-compose.sh kubemq',
},
'pubsub.mqtt3': {
certification: true,
},
'pubsub.mqtt3-emqx': {
conformance: true,
conformanceSetup: 'docker-compose.sh emqx',
},
'pubsub.mqtt3-vernemq': {
conformance: true,
conformanceSetup: 'docker-compose.sh vernemq',
},
'pubsub.natsstreaming': {
conformance: true,
conformanceSetup: 'docker-compose.sh natsstreaming',
},
'pubsub.pulsar': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh pulsar',
},
'pubsub.rabbitmq': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh rabbitmq',
},
'pubsub.redis.v6': {
conformance: true,
conformanceSetup: 'docker-compose.sh redisjson redis',
},
// This test is currently disabled due to issues with Redis v7
/*'pubsub.redis.v7': {
conformance: true,
conformanceSetup: 'docker-compose.sh redis7 redis',
},*/
'pubsub.solace': {
conformance: true,
conformanceSetup: 'docker-compose.sh solace',
},
'secretstores.azure.keyvault': {
certification: true,
requiredSecrets: [
'AzureKeyVaultName',
'AzureKeyVaultSecretStoreTenantId',
'AzureKeyVaultSecretStoreClientId',
'AzureKeyVaultSecretStoreServicePrincipalClientId',
'AzureKeyVaultSecretStoreServicePrincipalClientSecret',
'AzureContainerRegistryName',
'AzureResourceGroupName',
],
requiredCerts: ['AzureKeyVaultSecretStoreCert'],
},
'secretstores.azure.keyvault.certificate': {
conformance: true,
requiredSecrets: [
'AzureKeyVaultName',
'AzureKeyVaultSecretStoreTenantId',
'AzureKeyVaultSecretStoreClientId',
],
requiredCerts: ['AzureKeyVaultSecretStoreCert'],
},
'secretstores.azure.keyvault.serviceprincipal': {
conformance: true,
requiredSecrets: [
'AzureKeyVaultName',
'AzureKeyVaultSecretStoreTenantId',
'AzureKeyVaultSecretStoreServicePrincipalClientId',
'AzureKeyVaultSecretStoreServicePrincipalClientSecret',
],
},
'secretstores.hashicorp.vault': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh hashicorp-vault vault',
},
'secretstores.kubernetes': {
conformance: true,
requireKind: true,
conformanceSetup: 'conformance-secretstores.kubernetes-setup.sh',
},
'secretstores.local.env': {
conformance: true,
certification: true,
},
'secretstores.local.file': {
conformance: true,
certification: true,
},
'state.aws.dynamodb': {
certification: true,
requireAWSCredentials: true,
requireTerraform: true,
certificationSetup: 'certification-state.aws.dynamodb-setup.sh',
certificationDestroy: 'certification-state.aws.dynamodb-destroy.sh',
},
'state.aws.dynamodb.terraform': {
conformance: true,
requireAWSCredentials: true,
requireTerraform: true,
conformanceSetup: 'conformance-state.aws.dynamodb-setup.sh',
conformanceDestroy: 'conformance-state.aws.dynamodb-destroy.sh',
},
'state.azure.blobstorage': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureBlobStorageAccount',
'AzureBlobStorageAccessKey',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
'AzureBlobStorageContainer',
],
},
'state.azure.cosmosdb': {
conformance: true,
certification: true,
requiredSecrets: [
'AzureCosmosDBMasterKey',
'AzureCosmosDBUrl',
'AzureCosmosDB',
'AzureCosmosDBCollection',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
],
},
'state.azure.sql': {
conformance: true,
requiredSecrets: [
'AzureResourceGroupName',
'AzureSqlServerName',
'AzureSqlServerConnectionString',
],
conformanceSetup: 'conformance-state.azure.sql-setup.sh',
conformanceDestroy: 'conformance-state.azure.sql-destroy.sh',
},
'state.azure.tablestorage': {
certification: true,
requiredSecrets: [
'AzureBlobStorageAccount',
'AzureBlobStorageAccessKey',
'AzureCertificationTenantId',
'AzureCertificationServicePrincipalClientId',
'AzureCertificationServicePrincipalClientSecret',
],
},
'state.azure.tablestorage.cosmosdb': {
conformance: true,
requiredSecrets: [
'AzureCosmosDBTableAPI',
'AzureCosmosDBTableAPIMasterKey',
],
},
'state.azure.tablestorage.storage': {
conformance: true,
requiredSecrets: [
'AzureBlobStorageAccessKey',
'AzureBlobStorageAccount',
],
},
'state.cassandra': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh cassandra',
},
'state.cloudflare.workerskv': {
conformance: true,
requireCloudflareCredentials: true,
nodeJsVersion: '18.x',
conformanceSetup: 'conformance-state.cloudflare.workerskv-setup.sh',
conformanceDestroy: 'conformance-state.cloudflare.workerskv-destroy.sh',
},
'state.cockroachdb': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh cockroachdb',
},
'state.in-memory': {
conformance: true,
},
'state.memcached': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh memcached',
},
'state.mongodb': {
conformance: true,
certification: true,
mongoDbVersion: '4.2',
},
'state.mysql': {
certification: true,
},
'state.mysql.mariadb': {
conformance: true,
conformanceSetup: 'docker-compose.sh mariadb',
},
'state.mysql.mysql': {
conformance: true,
conformanceSetup: 'docker-compose.sh mysql',
},
'state.postgresql': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh postgresql',
},
'state.redis': {
certification: true,
},
'state.redis.v6': {
conformance: true,
conformanceSetup: 'docker-compose.sh redisjson redis',
},
'state.redis.v7': {
conformance: true,
conformanceSetup: 'docker-compose.sh redis7 redis',
},
'state.rethinkdb': {
conformance: true,
conformanceSetup: 'docker-compose.sh rethinkdb',
},
'state.sqlite': {
conformance: true,
certification: true,
},
'state.sqlserver': {
conformance: true,
certification: true,
conformanceSetup: 'docker-compose.sh sqlserver',
requiredSecrets: ['AzureSqlServerConnectionString'],
},
'workflows.temporal': {
conformance: true,
conformanceSetup: 'docker-compose.sh temporal',
},
}
/**
* Type for the objects in the components dictionary
* @typedef {Object} ComponentTestProperties
* @property {boolean?} conformance If true, enables for conformance tests
* @property {boolean?} certification If true, enables for certification tests
* @property {string[]?} requiredSecrets Required secrets (if not empty, test becomes "cloud-only")
* @property {string[]?} requiredCerts Required certs (if not empty, test becomes "cloud-only")
* @property {boolean?} requireAWSCredentials If true, requires AWS credentials and makes the test "cloud-only"
* @property {boolean?} requireCloudflareCredentials If true, requires Cloudflare credentials and makes the test "cloud-only"
* @property {boolean?} requireTerraform If true, requires Terraform
* @property {boolean?} requireKind If true, requires KinD
* @property {string?} conformanceSetup Setup script for conformance tests
* @property {string?} conformanceDestroy Destroy script for conformance tests
* @property {string?} certificationSetup Setup script for certification tests
* @property {string?} certificationDestroy Destroy script for certification tests
* @property {string?} nodeJsVersion If set, installs the specified Node.js version
* @property {string?} mongoDbVersion If set, installs the specified MongoDB version
*/
/**
* Test matrix object
* @typedef {Object} TestMatrixElement
* @property {string} component Component name
* @property {string?} required-secrets Required secrets
* @property {string?} required-certs Required certs
* @property {boolean?} require-aws-credentials Requires AWS credentials
* @property {boolean?} require-cloudflare-credentials Requires Cloudflare credentials
* @property {boolean?} require-terraform Requires Terraform
* @property {boolean?} require-kind Requires KinD
* @property {string?} setup-script Setup script
* @property {string?} destroy-script Destroy script
* @property {string?} nodejs-version Install the specified Node.js version if set
* @property {string?} mongodb-version Install the specified MongoDB version if set
*/
/**
* Returns the list of components for the matrix.
* @param {'conformance'|'certification'} testKind Kind of test
* @param {boolean} enableCloudTests If true, returns components that require secrets or credentials too (which can't be used as part of the regular CI in a PR)
* @returns {TestMatrixElement[]} Test matrix object
*/
function GenerateMatrix(testKind, enableCloudTests) {
/** @type {TestMatrixElement[]} */
const res = []
for (const name in components) {
const comp = components[name]
if (!comp[testKind]) {
continue
}
// Skip cloud-only tests if enableCloudTests is false
if (!enableCloudTests) {
if (
comp.requiredSecrets?.length ||
comp.requiredCerts?.length ||
comp.requireAWSCredentials ||
comp.requireCloudflareCredentials
) {
continue
}
}
// Add the component to the array
res.push({
component: name,
'required-secrets': comp.requiredSecrets?.length
? comp.requiredSecrets.join(',')
: undefined,
'required-certs': comp.requiredCerts?.length
? comp.requiredCerts.join(',')
: undefined,
'require-aws-credentials': comp.requireAWSCredentials
? 'true'
: undefined,
'require-cloudflare-credentials': comp.requireCloudflareCredentials
? 'true'
: undefined,
'require-terraform': comp.requireTerraform ? 'true' : undefined,
'require-kind': comp.requireKind ? 'true' : undefined,
'setup-script': comp[testKind + 'Setup'] || undefined,
'destroy-script': comp[testKind + 'Destroy'] || undefined,
'nodejs-version': comp.nodeJsVersion || undefined,
'mongodb-version': comp.mongoDbVersion || undefined,
})
}
return res
}
// Upon invocation, writes the matrix to the $GITHUB_OUTPUT file
if (!env.GITHUB_OUTPUT) {
console.error('Missing environmental variable GITHUB_OUTPUT')
exit(1)
}
if (argv.length < 3 || !['conformance', 'certification'].includes(argv[2])) {
console.error("First parameter must be 'conformance' or 'certification'")
exit(1)
}
if (argv.length < 4 || !['true', 'false'].includes(argv[3])) {
console.error("First parameter must be 'true' or 'false'")
exit(1)
}
const matrixObj = GenerateMatrix(argv[2], argv[3] == 'true')
console.log('Generated matrix:\n\n' + JSON.stringify(matrixObj, null, ' '))
writeFileSync(env.GITHUB_OUTPUT, 'test-matrix=' + JSON.stringify(matrixObj))

View File

@ -21,17 +21,23 @@ on:
- cron: '25 */8 * * *'
push:
branches:
- release-*
- 'release-*'
pull_request:
branches:
- release-*
# TODO: REMOVE "master" BEFORE MERGING
- 'master'
- 'release-*'
env:
# Only specify a major version, such as 1.20
GO_VERSION: '1.19'
jobs:
# Based on whether this is a PR or a scheduled run, we will run a different
# subset of the certification tests. This allows all the tests not requiring
# secrets to be executed on pull requests.
generate-matrix:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- name: Parse repository_dispatch payload
if: github.event_name == 'repository_dispatch'
@ -42,98 +48,24 @@ jobs:
echo "PR_NUMBER=${{ github.event.client_payload.issue.number }}" >> $GITHUB_ENV
fi
- name: Install yq
run: |
sudo snap install yq
- name: Check out code
uses: actions/checkout@v3
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Specify components that can be run on every PR
id: pr-components
- name: Generate test matrix
id: generate-matrix
env:
VAULT_NAME: ${{ secrets.AZURE_KEYVAULT }}
run: |
PR_COMPONENTS=$(yq -I0 --tojson eval - << EOF
- pubsub.kafka
- pubsub.rabbitmq
- pubsub.pulsar
- pubsub.mqtt3
- state.mongodb
- state.redis
- state.cockroachdb
- state.postgresql
- state.cassandra
- state.memcached
- state.mysql
- state.sqlite
- bindings.dubbo
- bindings.kafka
- bindings.redis
- bindings.cron
- secretstores.local.env
- secretstores.local.file
- secretstores.hashicorp.vault
- bindings.rabbitmq
- bindings.localstorage
- bindings.postgres
EOF
)
echo "pr-components=$PR_COMPONENTS" >> $GITHUB_OUTPUT
- name: Specify components requiring cloud resources to run
id: cloud-components
run: |
# Skip cloud-components on PRs, requires scheduled run trigger
# or approver to trigger via respository-dispatch on /ok-to-test
if [ "${{ github.event_name }}" = "pull_request" ]; then
echo "cloud-components=[]" >> $GITHUB_OUTPUT
exit
if [ -z "$VAULT_NAME" ]; then
# Do not include cloud tests when credentials are not available
node .github/scripts/test-info.mjs certification false
else
# Include cloud tests
node .github/scripts/test-info.mjs certification true
fi
# Reuse the same cloud infrastructure as conformance.yml
#
# Unfortunately, Azure secrets can't have underscores in
# names, while environment variables with hyphens ('-') are
# troublesome.
#
# We work around here by leveraging the fact that
# environment variable names are case sensitive, so
# CamelCase would still work.
#
# That is slightly better than something like
# AZURECOSMOSDBMASTERKEY, which is extremely hard to read
# and errorprone.
#
# Only list the secrets you need for the component.
CRON_COMPONENTS=$(yq -I0 --tojson eval - << EOF
- component: secretstores.azure.keyvault
required-secrets: AzureKeyVaultName,AzureKeyVaultSecretStoreTenantId,AzureKeyVaultSecretStoreClientId,AzureKeyVaultSecretStoreServicePrincipalClientId,AzureKeyVaultSecretStoreServicePrincipalClientSecret,AzureContainerRegistryName,AzureResourceGroupName
required-certs: AzureKeyVaultSecretStoreCert
- component: state.sqlserver
required-secrets: AzureSqlServerConnectionString
- component: bindings.azure.servicebusqueues
required-secrets: AzureServiceBusConnectionString
- component: bindings.azure.cosmosdb
required-secrets: AzureCosmosDBUrl,AzureCosmosDB,AzureCosmosDBCollection,AzureCosmosDBMasterKey,AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret
- component: bindings.azure.eventhubs
required-secrets: AzureEventHubsBindingsConnectionString,AzureBlobStorageAccount,AzureBlobStorageAccessKey,AzureEventHubsBindingsHub,AzureEventHubsBindingsNamespace,AzureEventHubsBindingsConsumerGroup,AzureCertificationServicePrincipalClientId,AzureCertificationTenantId,AzureCertificationServicePrincipalClientSecret,AzureResourceGroupName,AzureCertificationSubscriptionId,AzureEventHubsBindingsContainer,AzureIotHubEventHubConnectionString,AzureIotHubName,AzureIotHubBindingsConsumerGroup
- component: pubsub.azure.eventhubs
required-secrets: AzureEventHubsPubsubTopicActiveConnectionString,AzureEventHubsPubsubNamespace,AzureEventHubsPubsubNamespaceConnectionString,AzureBlobStorageAccount,AzureBlobStorageAccessKey,AzureEventHubsPubsubContainer,AzureIotHubName,AzureIotHubEventHubConnectionString,AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret,AzureResourceGroupName,AzureCertificationSubscriptionId
- component: pubsub.azure.servicebus.topics
required-secrets: AzureServiceBusConnectionString,AzureServiceBusNamespace, AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret
- component: bindings.azure.blobstorage
required-secrets: AzureBlobStorageAccount,AzureBlobStorageAccessKey,AzureBlobStorageContainer,AzureCertificationTenantId,AzureCertificationServicePrincipalClientId,AzureCertificationServicePrincipalClientSecret
- component: bindings.azure.storagequeues
required-secrets: AzureBlobStorageAccount, AzureBlobStorageAccessKey
- component: state.azure.tablestorage
required-secrets: AzureBlobStorageAccount, AzureBlobStorageAccessKey, AzureCertificationTenantId, AzureCertificationServicePrincipalClientId, AzureCertificationServicePrincipalClientSecret
- component: state.azure.blobstorage
required-secrets: AzureBlobStorageContainer,AzureBlobStorageAccount, AzureBlobStorageAccessKey, AzureCertificationTenantId, AzureCertificationServicePrincipalClientId, AzureCertificationServicePrincipalClientSecret
- component: state.azure.cosmosdb
required-secrets: AzureCosmosDBMasterKey, AzureCosmosDBUrl, AzureCosmosDB, AzureCosmosDBCollection, AzureCertificationTenantId, AzureCertificationServicePrincipalClientId, AzureCertificationServicePrincipalClientSecret
- component: pubsub.aws.snssqs
terraform-dir: pubsub/aws/snssqs
- component: state.aws.dynamodb
terraform-dir: state/aws/dynamodb
EOF
)
echo "cloud-components=$CRON_COMPONENTS" >> $GITHUB_OUTPUT
- name: Create PR comment
if: env.PR_NUMBER != ''
@ -150,24 +82,24 @@ jobs:
Commit ref: ${{ env.CHECKOUT_REF }}
outputs:
pr-components: ${{ steps.pr-components.outputs.pr-components }}
cloud-components: ${{ steps.cloud-components.outputs.cloud-components }}
test-matrix: ${{ steps.generate-matrix.outputs.test-matrix }}
certification:
name: ${{ matrix.component }} certification
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
env:
UNIQUE_ID: ${{github.run_id}}-${{github.run_attempt}}
defaults:
run:
shell: bash
needs: generate-matrix
env:
UNIQUE_ID: ${{github.run_id}}-${{github.run_attempt}}
needs:
- generate-matrix
strategy:
fail-fast: false # Keep running even if one component fails
matrix:
component: ${{ fromJson(needs.generate-matrix.outputs.pr-components) }}
include: ${{ fromJson(needs.generate-matrix.outputs.cloud-components) }}
matrix:
include: ${{ fromJson(needs.generate-matrix.outputs.test-matrix) }}
steps:
- name: Set default payload repo and ref
@ -189,22 +121,23 @@ jobs:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Setup test output
- name: Configure environment
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
# Output file
echo "TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report" >> $GITHUB_ENV
- name: Configure certification test and source path
run: |
TEST_COMPONENT=$(echo ${{ matrix.component }} | sed -E 's/\./\//g')
export TEST_PATH="tests/certification/${TEST_COMPONENT}"
echo "TEST_PATH=$TEST_PATH" >> $GITHUB_ENV
export SOURCE_PATH="github.com/dapr/components-contrib/${TEST_COMPONENT}"
# Certification test and source path
TEST_COMPONENT=$(echo "${{ matrix.component }}" | sed -E 's/\./\//g')
echo "TEST_PATH=tests/certification/${TEST_COMPONENT}" >> $GITHUB_ENV
SOURCE_PATH="github.com/dapr/components-contrib/${TEST_COMPONENT}"
echo "SOURCE_PATH=$SOURCE_PATH" >> $GITHUB_ENV
# converts slashes to dots in this string, so that it doesn't consider them sub-folders
export SOURCE_PATH_LINEAR=$(echo "$SOURCE_PATH" |sed 's#/#\.#g')
SOURCE_PATH_LINEAR=$(echo "$SOURCE_PATH" |sed 's#/#\.#g')
echo "SOURCE_PATH_LINEAR=$SOURCE_PATH_LINEAR" >> $GITHUB_ENV
# Current time (used by Terraform)
echo "CURRENT_TIME=$(date --rfc-3339=date)" >> ${GITHUB_ENV}
- uses: Azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
@ -214,7 +147,6 @@ jobs:
# az keyvault set-policy -n $AZURE_KEYVAULT --secret-permissions get list --spn $SPN_CLIENT_ID
# Using az cli to query keyvault as Azure/get-keyvault-secrets@v1 is deprecated
- name: Setup secrets
id: get-azure-secrets
if: matrix.required-secrets != ''
env:
VAULT_NAME: ${{ secrets.AZURE_KEYVAULT }}
@ -245,110 +177,37 @@ jobs:
echo "$CERT_NAME=$CERT_FILE" >> $GITHUB_ENV
done
- name: Get current time
run: |
echo "CURRENT_TIME=$(date --rfc-3339=date)" >> ${GITHUB_ENV}
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
if: matrix.terraform-dir != ''
uses: hashicorp/setup-terraform@v2.0.3
if: matrix.require-terraform == 'true'
- name: Set AWS Region
if: contains(matrix.component, 'aws')
- name: Set Cloudflare env vars
if: matrix.require-cloudflare-credentials == 'true'
run: |
AWS_REGION="us-west-1"
echo "AWS_REGION=$AWS_REGION" >> $GITHUB_ENV
echo "CLOUDFLARE_ACCOUNT_ID=${{ secrets.CLOUDFLARE_ACCOUNT_ID }}" >> $GITHUB_ENV
echo "CLOUDFLARE_API_TOKEN=${{ secrets.CLOUDFLARE_API_TOKEN }}" >> $GITHUB_ENV
- name: Set AWS env vars
if: matrix.require-aws-credentials == 'true'
run: |
echo "AWS_REGION=us-west-1" >> $GITHUB_ENV
echo "AWS_ACCESS_KEY=${{ secrets.AWS_ACCESS_KEY }}" >> $GITHUB_ENV
echo "AWS_SECRET_KEY=${{ secrets.AWS_SECRET_KEY }}" >> $GITHUB_ENV
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
# TODO: Remove "v1-node16" when v2 is released
# See: https://github.com/aws-actions/configure-aws-credentials/issues/489
uses: aws-actions/configure-aws-credentials@v1-node16
if: matrix.require-aws-credentials == 'true'
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY }}
aws-access-key-id: "${{ secrets.AWS_ACCESS_KEY }}"
aws-secret-access-key: "${{ secrets.AWS_SECRET_KEY }}"
aws-region: "${{ env.AWS_REGION }}"
if: matrix.terraform-dir != ''
- name: Terraform Init
id: init
run: terraform init
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Terraform Validate
id: validate
run: terraform validate -no-color
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Terraform Plan
id: plan
run: terraform plan -no-color -var="UNIQUE_ID=${{env.UNIQUE_ID}}" -var="TIMESTAMP=${{env.CURRENT_TIME}}"
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Terraform Apply
run: terraform apply -auto-approve -var="UNIQUE_ID=${{env.UNIQUE_ID}}" -var="TIMESTAMP=${{env.CURRENT_TIME}}"
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
continue-on-error: true
- name: Create aws.snssqs specific variables
if: contains(matrix.component, 'snssqs')
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
run: |
PUBSUB_AWS_SNSSQS_QUEUE_1="sqssnscerttest-q1-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_1=$PUBSUB_AWS_SNSSQS_QUEUE_1" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_2="sqssnscerttest-q2-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_2=$PUBSUB_AWS_SNSSQS_QUEUE_2" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_3="sqssnscerttest-q3-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_3=$PUBSUB_AWS_SNSSQS_QUEUE_3" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_3="sqssnscerttest-t3-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_TOPIC_3=$PUBSUB_AWS_SNSSQS_TOPIC_3" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_MVT="sqssnscerttest-q-mvt-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_MVT=$PUBSUB_AWS_SNSSQS_QUEUE_MVT" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_MVT="sqssnscerttest-tp-mvt-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_TOPIC_MVT=$PUBSUB_AWS_SNSSQS_TOPIC_MVT" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_DLIN="sqssnscerttest-dlq-in-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_DLIN=$PUBSUB_AWS_SNSSQS_QUEUE_DLIN" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_DLOUT="sqssnscerttest-dlq-out-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_DLOUT=$PUBSUB_AWS_SNSSQS_QUEUE_DLOUT" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_DLIN="sqssnscerttest-dlt-in-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_TOPIC_DLIN=$PUBSUB_AWS_SNSSQS_TOPIC_DLIN" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_FIFO="sqssnscerttest-q-fifo-${{env.UNIQUE_ID}}.fifo"
echo "PUBSUB_AWS_SNSSQS_QUEUE_FIFO=$PUBSUB_AWS_SNSSQS_QUEUE_FIFO" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_FIFO="sqssnscerttest-t-fifo-${{env.UNIQUE_ID}}.fifo"
echo "PUBSUB_AWS_SNSSQS_TOPIC_FIFO=$PUBSUB_AWS_SNSSQS_TOPIC_FIFO" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_FIFO_GROUP_ID="sqssnscerttest-q-fifo-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_FIFO_GROUP_ID=$PUBSUB_AWS_SNSSQS_FIFO_GROUP_ID" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_QUEUE_NODRT="sqssnscerttest-q-nodrt-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_QUEUE_NODRT=$PUBSUB_AWS_SNSSQS_QUEUE_NODRT" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_NODRT="sqssnscerttest-t-nodrt-${{env.UNIQUE_ID}}"
echo "PUBSUB_AWS_SNSSQS_TOPIC_NODRT=$PUBSUB_AWS_SNSSQS_TOPIC_NODRT" >> $GITHUB_ENV
AWS_REGION="us-east-1"
echo "AWS_REGION=$AWS_REGION" >> $GITHUB_ENV
- name: Create state aws.dynamodb specific variables
if: contains(matrix.component, 'dynamodb')
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
run: |
STATE_AWS_DYNAMODB_TABLE_1="certification-test-terraform-basic-${{ env.UNIQUE_ID }}"
echo "STATE_AWS_DYNAMODB_TABLE_1=$STATE_AWS_DYNAMODB_TABLE_1" >> $GITHUB_ENV
STATE_AWS_DYNAMODB_TABLE_2="certification-test-terraform-partition-key-${{ env.UNIQUE_ID }}"
echo "STATE_AWS_DYNAMODB_TABLE_2=$STATE_AWS_DYNAMODB_TABLE_2" >> $GITHUB_ENV
AWS_REGION="us-east-1"
echo "AWS_REGION=$AWS_REGION" >> $GITHUB_ENV
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '^1.19'
go-version: '^${{ env.GO_VERSION }}'
- name: Download Go dependencies
working-directory: ${{ env.TEST_PATH }}
@ -360,10 +219,19 @@ jobs:
- name: Check that go mod tidy is up-to-date
working-directory: ${{ env.TEST_PATH }}
run: |
go mod tidy -compat=1.19
go mod tidy -compat=${{ env.GO_VERSION }}
git diff --exit-code ./go.mod
git diff --exit-code ./go.sum
- name: Run setup script
if: matrix.setup-script != ''
run: .github/scripts/components-scripts/${{ matrix.setup-script }}
- name: Catch setup failures
if: failure()
run: |
echo "CERTIFICATION_FAILURE=true" >> $GITHUB_ENV
- name: Run tests
continue-on-error: false
working-directory: ${{ env.TEST_PATH }}
@ -402,7 +270,7 @@ jobs:
CERT_FILE=$(printenv $CERT_NAME)
echo "Cleaning up the certificate file $CERT_FILE..."
rm $CERT_FILE
rm $CERT_FILE || true
done
if [[ -v CERTIFICATION_FAILURE ]]; then
@ -462,15 +330,13 @@ jobs:
name: ${{ matrix.component }}_certification_test
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_certification.*
- name: Terraform Destroy
continue-on-error: true
run: terraform destroy -auto-approve -var="UNIQUE_ID=${{env.UNIQUE_ID}}" -var="TIMESTAMP=${{env.CURRENT_TIME}}"
working-directory: "./.github/infrastructure/terraform/certification/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Run destroy script
if: always() && matrix.destroy-script != ''
run: .github/scripts/components-scripts/${{ matrix.destroy-script }}
post_job:
name: Post-completion
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
if: always()
needs:
- certification
@ -500,9 +366,7 @@ jobs:
uses: actions/github-script@v6
with:
script: |
const prComponents = ('${{ needs.generate-matrix.outputs.pr-components }}' && JSON.parse('${{ needs.generate-matrix.outputs.pr-components }}')) || []
const cloudComponents = ('${{ needs.generate-matrix.outputs.cloud-components }}' && JSON.parse('${{ needs.generate-matrix.outputs.cloud-components }}')) || []
const allComponents = [...prComponents, ...cloudComponents]
const allComponents = JSON.parse('${{ needs.generate-matrix.outputs.test-matrix }}')
const basePath = '${{ steps.testresults.outputs.download-path }}'
const testType = 'certification'
@ -530,7 +394,7 @@ jobs:
let found = false
let success = false
try {
let read =fs.readFileSync(path.join(basePath, component + '.txt'), 'utf8')
let read = fs.readFileSync(path.join(basePath, component + '.txt'), 'utf8')
read = read.split('\n')[0]
switch (read) {
case '1':

View File

@ -22,18 +22,22 @@ on:
- cron: '0 */8 * * *'
push:
branches:
- 'release-*'
- 'release-*'
pull_request:
branches:
- master
- 'master'
- 'release-*'
env:
# Only specify a major version, such as 1.20
GO_VERSION: '1.19'
jobs:
# Based on whether this is a PR or a scheduled run, we will run a different
# subset of the conformance tests. This allows all the tests not requiring
# secrets to be executed on pull requests.
generate-matrix:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- name: Parse repository_dispatch payload
if: github.event_name == 'repository_dispatch'
@ -44,126 +48,24 @@ jobs:
echo "PR_NUMBER=${{ github.event.client_payload.issue.number }}" >> $GITHUB_ENV
fi
- name: Install yq
run: |
sudo snap install yq
- name: Check out code
uses: actions/checkout@v3
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Specify components not requiring secrets nor certs
id: pr-components
- name: Generate test matrix
id: generate-matrix
env:
VAULT_NAME: ${{ secrets.AZURE_KEYVAULT }}
run: |
PR_COMPONENTS=$(yq -I0 --tojson eval - << EOF
- bindings.cron
- bindings.http
- bindings.influx
- bindings.kafka-wurstmeister
- bindings.kafka-confluent
- bindings.mqtt3-emqx
- bindings.mqtt3-mosquitto
- bindings.mqtt3-vernemq
- bindings.postgres
- bindings.redis.v6
- bindings.redis.v7
- bindings.kubemq
- bindings.rabbitmq
- pubsub.aws.snssqs.docker
- configuration.redis.v6
- configuration.redis.v7
- pubsub.hazelcast
- pubsub.in-memory
- pubsub.mqtt3-emqx
- pubsub.mqtt3-vernemq
- pubsub.natsstreaming
- pubsub.pulsar
- pubsub.rabbitmq
- pubsub.redis.v6
- pubsub.kafka-wurstmeister
- pubsub.kafka-confluent
- pubsub.kubemq
- pubsub.solace
- secretstores.kubernetes
- secretstores.localenv
- secretstores.localfile
- secretstores.hashicorp.vault
- state.cassandra
- state.memcached
- state.mongodb
- state.mysql.mysql
- state.mysql.mariadb
- state.postgresql
- state.redis.v6
- state.redis.v7
- state.sqlite
- state.sqlserver
- state.in-memory
- state.cockroachdb
- workflows.temporal
- state.rethinkdb
EOF
)
echo "pr-components=$PR_COMPONENTS" >> $GITHUB_OUTPUT
- name: Specify components requiring secrets or certs
id: cron-components
run: |
if [ "${{ github.event_name }}" = "pull_request" ]; then
echo "cron-components=[]" >> $GITHUB_OUTPUT
exit
if [ -z "$VAULT_NAME" ]; then
# Do not include cloud tests when credentials are not available
node .github/scripts/test-info.mjs conformance false
else
# Include cloud tests
node .github/scripts/test-info.mjs conformance true
fi
# Unfortunately, Azure secrets can't have underscores in
# names, while environment variables with hyphens ('-') are
# troublesome.
#
# We work around here by leveraging the fact that
# environment variable names are case sensitive, so
# CamelCase would still work.
#
# That is slightly better than something like
# AZURECOSMOSDBMASTERKEY, which is extremely hard to read
# and errorprone.
#
# Only list the secrets you need for the component.
CRON_COMPONENTS=$(yq -I0 --tojson eval - << EOF
- component: state.azure.blobstorage
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount
- component: state.azure.cosmosdb
required-secrets: AzureCosmosDBMasterKey,AzureCosmosDBUrl,AzureCosmosDB,AzureCosmosDBCollection
- component: state.azure.sql
required-secrets: AzureResourceGroupName, AzureSqlServerName, AzureSqlServerConnectionString
- component: state.azure.tablestorage.storage
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount
- component: state.azure.tablestorage.cosmosdb
required-secrets: AzureCosmosDBTableAPI,AzureCosmosDBTableAPIMasterKey
- component: pubsub.azure.eventhubs
required-secrets: AzureEventHubsPubsubNamespaceConnectionString,AzureEventHubsPubsubConsumerGroup,AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureEventHubsPubsubContainer
- component: pubsub.azure.servicebus.topics
required-secrets: AzureServiceBusConnectionString
- component: pubsub.azure.servicebus.queues
required-secrets: AzureServiceBusConnectionString
- component: bindings.azure.blobstorage
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount
- component: bindings.azure.eventgrid
required-secrets: AzureEventGridNgrokToken,AzureEventGridAccessKey,AzureEventGridTopicEndpoint,AzureEventGridScope,AzureEventGridClientSecret,AzureEventGridClientId,AzureEventGridTenantId,AzureEventGridSubscriptionId
- component: bindings.azure.eventhubs
required-secrets: AzureEventHubsBindingsConnectionString,AzureEventHubsBindingsConsumerGroup,AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureEventHubsBindingsContainer
- component: bindings.azure.servicebusqueues
required-secrets: AzureServiceBusConnectionString
- component: bindings.azure.storagequeues
required-secrets: AzureBlobStorageAccessKey,AzureBlobStorageAccount,AzureBlobStorageQueue
- component: secretstores.azure.keyvault.certificate
required-secrets: AzureKeyVaultName,AzureKeyVaultSecretStoreTenantId,AzureKeyVaultSecretStoreClientId
required-certs: AzureKeyVaultSecretStoreCert
- component: secretstores.azure.keyvault.serviceprincipal
required-secrets: AzureKeyVaultName,AzureKeyVaultSecretStoreTenantId,AzureKeyVaultSecretStoreServicePrincipalClientId,AzureKeyVaultSecretStoreServicePrincipalClientSecret
- component: bindings.azure.cosmosdb
required-secrets: AzureCosmosDBMasterKey,AzureCosmosDBUrl,AzureCosmosDB,AzureCosmosDBCollection
- component: pubsub.aws.snssqs.terraform
terraform-dir: pubsub/aws/snssqs
- component: state.aws.dynamodb.terraform
terraform-dir: state/aws/dynamodb
- component: state.cloudflare.workerskv
EOF
)
echo "cron-components=$CRON_COMPONENTS" >> $GITHUB_OUTPUT
- name: Create PR comment
if: env.PR_NUMBER != ''
@ -180,27 +82,24 @@ jobs:
Commit ref: ${{ env.CHECKOUT_REF }}
outputs:
pr-components: ${{ steps.pr-components.outputs.pr-components }}
cron-components: ${{ steps.cron-components.outputs.cron-components }}
test-matrix: ${{ steps.generate-matrix.outputs.test-matrix }}
conformance:
name: ${{ matrix.component }} conformance
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
env:
# Version of Node.js to use
# Currently used by the Cloudflare components
NODE_VERSION: 18.x
UNIQUE_ID: ${{github.run_id}}-${{github.run_attempt}}
defaults:
run:
shell: bash
needs: generate-matrix
needs:
- generate-matrix
strategy:
fail-fast: false # Keep running even if one component fails
matrix:
component: ${{ fromJson(needs.generate-matrix.outputs.pr-components) }}
include: ${{ fromJson(needs.generate-matrix.outputs.cron-components) }}
matrix:
include: ${{ fromJson(needs.generate-matrix.outputs.test-matrix) }}
steps:
- name: Set default payload repo and ref
@ -219,27 +118,29 @@ jobs:
echo "PR_NUMBER=${{ github.event.client_payload.issue.number }}" >> $GITHUB_ENV
fi
- name: Check out code onto GOPATH
- name: Check out code
uses: actions/checkout@v3
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Setup test output
- name: Setup test environment
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
# Output file
echo "TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report" >> $GITHUB_ENV
# Current time (used by Terraform)
echo "CURRENT_TIME=$(date --rfc-3339=date)" >> ${GITHUB_ENV}
- uses: Azure/login@v1
if: matrix.required-secrets != ''
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
if: matrix.required-secrets != ''
# Set this GitHub secret to your KeyVault, and grant the KeyVault policy to your Service Principal:
# az keyvault set-policy -n $AZURE_KEYVAULT --secret-permissions get list --spn $SPN_CLIENT_ID
# Using az cli to query keyvault as Azure/get-keyvault-secrets@v1 is deprecated
- name: Setup secrets
id: get-azure-secrets
if: matrix.required-secrets != ''
env:
VAULT_NAME: ${{ secrets.AZURE_KEYVAULT }}
@ -256,21 +157,6 @@ jobs:
echo "$secretName=$value" >> $GITHUB_ENV
done
- name: Start ngrok
if: contains(matrix.component, 'azure.eventgrid')
run: |
wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
unzip -qq ngrok-stable-linux-amd64.zip
./ngrok authtoken ${{ env.AzureEventGridNgrokToken }}
./ngrok http -log=stdout --log-level debug -host-header=localhost 9000 > /tmp/ngrok.log &
sleep 10
export NGROK_ENDPOINT=`cat /tmp/ngrok.log | grep -Eom1 'https://.*' | sed 's/\s.*//'`
echo "Ngrok's endpoint: ${NGROK_ENDPOINT}"
echo "AzureEventGridSubscriberEndpoint=${NGROK_ENDPOINT}/api/events" >> $GITHUB_ENV
cat /tmp/ngrok.log
# Schedule trigger to kill ngrok
bash -c "sleep 500 && pkill ngrok" &
# Download the required certificates into files, and set env var pointing to their names
- name: Setup certs
if: matrix.required-certs != ''
@ -284,257 +170,70 @@ jobs:
echo "$CERT_NAME=$CERT_FILE" >> $GITHUB_ENV
done
- name: Get current time
run: |
echo "CURRENT_TIME=$(date --rfc-3339=date)" >> ${GITHUB_ENV}
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
if: matrix.terraform-dir != ''
if: matrix.require-terraform == 'true'
uses: hashicorp/setup-terraform@v2.0.3
- name: Set Cloudflare env vars
if: matrix.require-cloudflare-credentials == 'true'
run: |
echo "CLOUDFLARE_ACCOUNT_ID=${{ secrets.CLOUDFLARE_ACCOUNT_ID }}" >> $GITHUB_ENV
echo "CLOUDFLARE_API_TOKEN=${{ secrets.CLOUDFLARE_API_TOKEN }}" >> $GITHUB_ENV
- name: Set AWS env vars
if: matrix.require-aws-credentials == 'true'
run: |
echo "AWS_ACCESS_KEY=${{ secrets.AWS_ACCESS_KEY }}" >> $GITHUB_ENV
echo "AWS_SECRET_KEY=${{ secrets.AWS_SECRET_KEY }}" >> $GITHUB_ENV
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
if: matrix.require-aws-credentials == 'true'
# TODO: Remove "v1-node16" when v2 is released
# See: https://github.com/aws-actions/configure-aws-credentials/issues/489
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY }}
aws-region: us-west-1
if: matrix.terraform-dir != ''
- name: Terraform Init
id: init
run: terraform init
working-directory: "./.github/infrastructure/terraform/conformance/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Terraform Validate
id: validate
run: terraform validate -no-color
working-directory: "./.github/infrastructure/terraform/conformance/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Terraform Plan
id: plan
run: terraform plan -no-color -var="UNIQUE_ID=${{env.UNIQUE_ID}}" -var="TIMESTAMP=${{env.CURRENT_TIME}}"
working-directory: "./.github/infrastructure/terraform/conformance/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Terraform Apply
run: terraform apply -auto-approve -var="UNIQUE_ID=${{env.UNIQUE_ID}}" -var="TIMESTAMP=${{env.CURRENT_TIME}}"
working-directory: "./.github/infrastructure/terraform/conformance/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
continue-on-error: true
- name: Create aws.snssqs variables
run: |
PUBSUB_AWS_SNSSQS_QUEUE="testQueue-${{ env.UNIQUE_ID }}"
echo "PUBSUB_AWS_SNSSQS_QUEUE=$PUBSUB_AWS_SNSSQS_QUEUE" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC="testTopic-${{ env.UNIQUE_ID }}"
echo "PUBSUB_AWS_SNSSQS_TOPIC=$PUBSUB_AWS_SNSSQS_TOPIC" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_MULTI_1="multiTopic1-${{ env.UNIQUE_ID }}"
echo "PUBSUB_AWS_SNSSQS_TOPIC_MULTI_1=$PUBSUB_AWS_SNSSQS_TOPIC_MULTI_1" >> $GITHUB_ENV
PUBSUB_AWS_SNSSQS_TOPIC_MULTI_2="multiTopic2-${{ env.UNIQUE_ID }}"
echo "PUBSUB_AWS_SNSSQS_TOPIC_MULTI_2=$PUBSUB_AWS_SNSSQS_TOPIC_MULTI_2" >> $GITHUB_ENV
if: contains(matrix.component, 'snssqs')
- name: Create aws.dynamodb variables
run: |
STATE_AWS_DYNAMODB_TABLE_1="conformance-test-terraform-basic-${{ env.UNIQUE_ID }}"
echo "STATE_AWS_DYNAMODB_TABLE_1=$STATE_AWS_DYNAMODB_TABLE_1" >> $GITHUB_ENV
STATE_AWS_DYNAMODB_TABLE_2="conformance-test-terraform-partition-key-${{ env.UNIQUE_ID }}"
echo "STATE_AWS_DYNAMODB_TABLE_2=$STATE_AWS_DYNAMODB_TABLE_2" >> $GITHUB_ENV
if: contains(matrix.component, 'dynamodb')
- name: Start Redis 6 with Redis JSON
run: docker-compose -f ./.github/infrastructure/docker-compose-redisjson.yml -p redis up -d
if: contains(matrix.component, 'redis.v6')
- name: Start Redis 7
run: docker-compose -f ./.github/infrastructure/docker-compose-redis7.yml -p redis up -d
if: contains(matrix.component, 'redis.v7')
- name: Start Temporal
run: docker-compose -f ./.github/infrastructure/docker-compose-temporal.yml -p temporal up -d
if: contains(matrix.component, 'temporal')
- name: Start MongoDB
uses: supercharge/mongodb-github-action@1.3.0
if: matrix.mongodb-version != ''
uses: supercharge/mongodb-github-action@1.8.0
with:
mongodb-version: 4.2
mongodb-version: ${{ matrix.mongodb-version }}
mongodb-replica-set: test-rs
if: contains(matrix.component, 'mongodb')
- name: Start sqlserver
run: docker-compose -f ./.github/infrastructure/docker-compose-sqlserver.yml -p sqlserver up -d
if: contains(matrix.component, 'sqlserver')
- name: Start kafka
run: docker-compose -f ./.github/infrastructure/docker-compose-kafka.yml -p kafka up -d
if: contains(matrix.component, 'wurstmeister')
- name: Start kafka Confluent
run: docker-compose -f ./.github/infrastructure/docker-compose-confluent.yml -p confluent up -d
if: contains(matrix.component, 'confluent')
- name: Start memcached
run: docker-compose -f ./.github/infrastructure/docker-compose-memcached.yml -p memcached up -d
if: contains(matrix.component, 'memcached')
- name: Start natsstreaming
run: docker-compose -f ./.github/infrastructure/docker-compose-natsstreaming.yml -p natsstreaming up -d
if: contains(matrix.component, 'natsstreaming')
- name: Start pulsar
run: docker-compose -f ./.github/infrastructure/docker-compose-pulsar.yml -p pulsar up -d
if: contains(matrix.component, 'pulsar')
- name: Start Eclipse Mosquitto (MQTT3)
run: docker-compose -f ./.github/infrastructure/docker-compose-mosquitto.yml -p mosquitto up -d
if: contains(matrix.component, 'mqtt3-mosquitto')
- name: Start EMQ X (MQTT3)
run: docker-compose -f ./.github/infrastructure/docker-compose-emqx.yml -p emqx up -d
if: contains(matrix.component, 'mqtt3-emqx')
- name: Start VerneMQ (MQTT3)
run: docker-compose -f ./.github/infrastructure/docker-compose-vernemq.yml -p vernemq up -d
if: contains(matrix.component, 'mqtt3-vernemq')
- name: Start hazelcast
run: docker-compose -f ./.github/infrastructure/docker-compose-hazelcast.yml -p hazelcast up -d
if: contains(matrix.component, 'hazelcast')
- name: Start rabbitmq
run: docker-compose -f ./.github/infrastructure/docker-compose-rabbitmq.yml -p rabbitmq up -d
if: contains(matrix.component, 'rabbitmq')
- name: Install Node.js ${{ env.NODE_VERSION }}
if: contains(matrix.component, 'cloudflare')
uses: actions/setup-node@v3
with:
node-version: ${{ env.NODE_VERSION }}
- name: Check Cloudflare Workers code
if: contains(matrix.component, 'cloudflare')
run: |
# Build the Worker
(
cd internal/component/cloudflare/worker-src;
npm ci;
npm run build;
)
# Check no changes
git diff --exit-code ./internal/component/cloudflare/workers/code \
|| (echo "The source code of the Cloudflare Worker has changed, but the Worker has not been recompiled. Please re-compile the Worker by running 'npm ci && npm run build' in 'internal/component/cloudflare/worker-src'" && exit 1)
- name: Setup Cloudflare KV
if: matrix.component == 'state.cloudflare.workerskv'
env:
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
run: |
CloudflareWorkerName="daprconfkv${{ github.run_id }}${{ github.run_attempt }}"
CloudflareKVNamespaceID=$( curl -s -X POST "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/storage/kv/namespaces" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}" \
-H "Content-Type: application/json" \
--data "{\"title\":\"${CloudflareWorkerName}\"}" \
| jq -r ".result.id" )
echo "CloudflareWorkerName=${CloudflareWorkerName}" >> $GITHUB_ENV
echo "CloudflareAPIToken=${CLOUDFLARE_API_TOKEN}" >> $GITHUB_ENV
echo "CloudflareAccountID=${CLOUDFLARE_ACCOUNT_ID}" >> $GITHUB_ENV
echo "CloudflareKVNamespaceID=${CloudflareKVNamespaceID}" >> $GITHUB_ENV
- name: Start aws snssqs
run: docker-compose -f ./.github/infrastructure/docker-compose-snssqs.yml -p snssqs up -d
if: contains(matrix.component, 'aws.snssqs.docker')
- name: Start influxdb
run: |
export INFLUX_TOKEN=$(openssl rand -base64 32)
echo "INFLUX_TOKEN=$INFLUX_TOKEN" >> $GITHUB_ENV
docker-compose -f ./.github/infrastructure/docker-compose-influxdb.yml -p influxdb up -d
if: contains(matrix.component, 'influx')
- name: Start mysql
run: |
docker-compose -f ./.github/infrastructure/docker-compose-mysql.yml -p mysql up -d
if: contains(matrix.component, 'mysql.mysql')
- name: Start mariadb
run: |
docker-compose -f ./.github/infrastructure/docker-compose-mariadb.yml -p mariadb up -d
if: contains(matrix.component, 'mysql.mariadb')
- name: Start KinD
uses: helm/kind-action@v1.4.0
if: contains(matrix.component, 'kubernetes')
- name: Start postgresql
run: |
docker-compose -f ./.github/infrastructure/docker-compose-postgresql.yml -p postgresql up -d
if: contains(matrix.component, 'postgres')
- name: Start cassandra
run: |
docker-compose -f ./.github/infrastructure/docker-compose-cassandra.yml -p cassandra up -d
if: contains(matrix.component, 'cassandra')
- name: Start cockroachdb
run: |
docker-compose -f ./.github/infrastructure/docker-compose-cockroachdb.yml -p cockroachdb up -d
if: contains(matrix.component, 'cockroachdb')
- name: Start vault
run: |
docker-compose -f ./.github/infrastructure/docker-compose-hashicorp-vault.yml -p vault up -d
if: contains(matrix.component, 'hashicorp.vault')
- name: Start rethinkdb
run: |
docker-compose -f ./.github/infrastructure/docker-compose-rethinkdb.yml -p rethinkdb up -d
if: contains(matrix.component, 'rethinkdb')
- name: Start kubemq
run: docker-compose -f ./.github/infrastructure/docker-compose-kubemq.yml -p kubemq up -d
if: contains(matrix.component, 'kubemq')
- name: Start solace
run: docker-compose -f ./.github/infrastructure/docker-compose-solace.yml -p solace up -d
if: contains(matrix.component, 'solace')
- name: Start nats with JetStream
run: |
docker-compose -f ./.github/infrastructure/docker-compose-jetstream.yml up -p jetstream -d
if: contains(matrix.component, 'jetstream')
- name: Setup KinD test data
if: contains(matrix.component, 'kubernetes')
run: |
kubectl apply -f tests/config/kind-data.yaml
echo "NAMESPACE=default" >> $GITHUB_ENV
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '^1.19'
go-version: '^${{ env.GO_VERSION }}'
- name: Install Node.js ${{ matrix.nodejs-version }}
if: matrix.nodejs-version != ''
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.nodejs-version }}
- name: Start KinD
uses: helm/kind-action@v1.5.0
if: matrix.require-kind == 'true'
- name: Download Go dependencies
run: |
go mod download
go install gotest.tools/gotestsum@latest
- name: Generate Azure SQL DB name
- name: Run setup script
if: matrix.setup-script != ''
run: .github/scripts/components-scripts/${{ matrix.setup-script }}
- name: Catch setup failures
if: failure()
run: |
# Use UUID with `-` stripped out for DB names to prevent collisions between workflows
export AzureSqlServerDbName=$(cat /proc/sys/kernel/random/uuid | sed -E 's/-//g')
echo "AzureSqlServerDbName=$AzureSqlServerDbName" >> $GITHUB_ENV
if: contains(matrix.component, 'azure.sql')
echo "CONFORMANCE_FAILURE=true" >> $GITHUB_ENV
- name: Run tests
continue-on-error: true
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }}
run: |
set -e
KIND=$(echo ${{ matrix.component }} | cut -d. -f1)
@ -566,60 +265,16 @@ jobs:
exit -1
fi
- name: Stop ngrok
if: contains(matrix.component, 'azure.eventgrid')
continue-on-error: true
run: |
set +e
echo "GET ngrok tunnels:"
curl http://localhost:4040/api/tunnels
echo "GET ngrok http requests:"
curl http://localhost:4040/api/requests/http
pkill ngrok
cat /tmp/ngrok.log
- name: Cleanup Azure SQL test DB instance
if: contains(matrix.component, 'azure.sql')
continue-on-error: true
run: |
# Wait for the creation of the DB by the test to propagate to ARM, otherwise deletion succeeds as no-op.
# The wait should be under 30s, but is capped at 1m as flakiness here results in an accumulation of expensive DB instances over time.
# Also note that the deletion call only blocks until the request is process, do not rely on it for mutex on the same DB,
# deletion may be ongoing in sequential runs.
sleep 1m
az sql db delete --resource-group ${{ env.AzureResourceGroupName }} --server ${{ env.AzureSqlServerName }} -n ${{ env.AzureSqlServerDbName }} --yes
- name: Cleanup Cloudflare KV
if: always() && matrix.component == 'state.cloudflare.workerskv'
continue-on-error: true
env:
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
run: |
# Delete the Worker
curl -X DELETE "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/workers/scripts/${{ env.CloudflareWorkerName }}" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}"
# Delete the KV namespace
curl -X DELETE "https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/storage/kv/namespaces/${{ env.CloudflareKVNamespaceID }}" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}"
# Download the required certificates into files, and set env var pointing to their names
- name: Clean up certs
if: matrix.required-certs != ''
- name: Delete downloaded up certs
if: always() && matrix.required-certs != ''
run: |
for CERT_NAME in $(echo "${{ matrix.required-certs }}" | sed 's/,/ /g'); do
CERT_FILE=$(printenv $CERT_NAME)
echo "Cleaning up the certificate file $CERT_FILE..."
rm $CERT_FILE
rm $CERT_FILE || true
done
- name: Terraform Destroy
continue-on-error: true
run: terraform destroy -auto-approve -var="UNIQUE_ID=${{env.UNIQUE_ID}}" -var="TIMESTAMP=${{env.CURRENT_TIME}}"
working-directory: "./.github/infrastructure/terraform/conformance/${{ matrix.terraform-dir }}"
if: matrix.terraform-dir != ''
- name: Check conformance test passed
continue-on-error: false
run: |
@ -655,9 +310,13 @@ jobs:
name: ${{ matrix.component }}_conformance_test
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_conformance.*
- name: Run destroy script
if: always() && matrix.destroy-script != ''
run: .github/scripts/components-scripts/${{ matrix.destroy-script }}
post_job:
name: Post-completion
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
if: always()
needs:
- conformance
@ -687,9 +346,7 @@ jobs:
uses: actions/github-script@v6
with:
script: |
const prComponents = ('${{ needs.generate-matrix.outputs.pr-components }}' && JSON.parse('${{ needs.generate-matrix.outputs.pr-components }}')) || []
const cronComponents = ('${{ needs.generate-matrix.outputs.cron-components }}' && JSON.parse('${{ needs.generate-matrix.outputs.cron-components }}')) || []
const allComponents = [...prComponents, ...cronComponents]
const allComponents = JSON.parse('${{ needs.generate-matrix.outputs.test-matrix }}')
const basePath = '${{ steps.testresults.outputs.download-path }}'
const testType = 'conformance'
@ -717,7 +374,7 @@ jobs:
let found = false
let success = false
try {
let read =fs.readFileSync(path.join(basePath, component + '.txt'), 'utf8')
let read = fs.readFileSync(path.join(basePath, component + '.txt'), 'utf8')
read = read.split('\n')[0]
switch (read) {
case '1':

1
.prettierignore Normal file
View File

@ -0,0 +1 @@
internal/component/cloudflare/workers/code/

6
.prettierrc.json Normal file
View File

@ -0,0 +1,6 @@
{
"trailingComma": "es5",
"tabWidth": 4,
"semi": false,
"singleQuote": true
}

View File

@ -216,6 +216,19 @@ check-component-metadata-schema-diff: component-metadata-schema
bundle-component-metadata:
$(RUN_BUILD_TOOLS) bundle-component-metadata > ../component-metadata-bundle.json
################################################################################
# Prettier #
################################################################################
.PHONY: prettier-install prettier-check prettier-format
prettier-install:
npm install --global prettier
prettier-check:
npx prettier --check "*/**/*.{ts,js,mjs,json}"
prettier-format:
npx prettier --write "*/**/*.{ts,js,mjs,json}"
################################################################################
# Target: conf-tests #
################################################################################

View File

@ -106,13 +106,13 @@ const router = Router()
return errorRes
}
let expirationTtl: number|undefined = undefined
let expirationTtl: number | undefined = undefined
const reqUrl = new URL(req.url)
const ttlParam = parseInt(reqUrl.searchParams.get('ttl') ||'', 10)
const ttlParam = parseInt(reqUrl.searchParams.get('ttl') || '', 10)
if (ttlParam > 0) {
expirationTtl = ttlParam
}
await namespace!.put(key!, req.body!, {expirationTtl})
await namespace!.put(key!, req.body!, { expirationTtl })
return new Response('', { status: 201 })
}
@ -172,7 +172,10 @@ async function setupKVRequest(
return { errorRes: new Response('Bad request', { status: 400 }) }
}
const namespace = env[req.params.namespace] as KVNamespace<string>
if (typeof namespace != 'object' || !['KVNamespace', 'KvNamespace'].includes(namespace?.constructor?.name)) {
if (
typeof namespace != 'object' ||
!['KVNamespace', 'KvNamespace'].includes(namespace?.constructor?.name)
) {
return {
errorRes: new Response(
`Worker is not bound to KV '${req.params.kv}'`,

File diff suppressed because one or more lines are too long

View File

@ -3,4 +3,4 @@
"nestedsecret": {
"secret": "efgh"
}
}
}

View File

@ -1,9 +1,9 @@
# Supported operations: get, bulkget
componentType: secretstores
components:
- component: localenv
operations: ["get"]
- component: localfile
- component: local.env
allOperations: true
- component: local.file
allOperations: true
- component: azure.keyvault.certificate
allOperations: true

View File

@ -63,17 +63,16 @@
1. Test setup is independent of the test run.
2. Run the service that needs to conformance tested locally or in your own cloud account.
- For cloud-agnostic components such as Kafka, MQTT etc., there are `docker-compose` definitions under the [/.github/infrastructure](https://github.com/dapr/components-contrib/tree/master/.github/infrastructure) folder you can use to quickly create an instance of the service. For example, to setup Kafka for conformance tests:
- For cloud-agnostic components such as Kafka, MQTT etc., there are `docker-compose` definitions under the [/.github/infrastructure](../../.github/infrastructure/) folder you can use to quickly create an instance of the service. For example, to setup Kafka for conformance tests:
```bash
docker-compose -f ./.github/infrastructure/docker-compose-kafka.yml -p kafka up -d
```
- For Azure components such as Blob Storage, Key Vault etc., there is an automation script that can help you create the resources under your subscription, and extract the environment variables needed to run the conformance tests. See [/.github/infrastructure/conformance/azure/README.md](../../.github/infrastructure/conformance/azure/README.md) for more details.
- Some components require additional set up or teardown scripts, which are placed in [/.github/scripts/components-scripts/](../../.github/scripts/components-scripts/)
> Given the variability in components and how they need to be set up for the conformance tests, you may need to refer to the [GitHub workflow for conformance tests](../../.github/workflows/conformance.yml) for any extra setup required by some components. E.g. Azure Event Grid bindings require setting up an Ngrok instance or similar endpoint for the test.
3. Some conformance tests require credentials in the form of environment variables. For examples Azure CosmosDB conformance tests will need to have Azure CosmosDB credentials. You will need to supply them to make these tests pass.
3. Some conformance tests require credentials in the form of environment variables. For examples Azure Cosmos DB conformance tests will need to have Azure Cosmos DB credentials. You will need to supply them to make these tests pass.
4. To run specific tests, run:
```bash
@ -121,22 +120,22 @@ If you want to combine VS Code & dlv for debugging so you can set breakpoints in
}
```
## Using terraform for conformance tests
## Using Terraform for conformance tests
If you are writing new conformance tests and they require cloud resources, you should use the
terraform framework we have in place. To enable your component test to use terraform there are a few changes in the normal steps you must do.
If you are writing new conformance tests and they require cloud resources, you should use the Terraform framework we have in place. To enable your component test to use terraform there are a few changes in the normal steps you must do.
1. In the `conformance.yml` you should create a new step in a workflow for your component that creates new env variables. You will need a variable for each specific resource your tests will use. If you require 3 different topics and 2 different tables for your tests you should have 5 different env variables set. The only convention you must follow for the variables is the value must use `env.UNIQUE_ID` to ensure there are no conflicts with the resource names.
1. Create a setup and teardown script in [/.github/scripts/components-scripts/](../../.github/scripts/components-scripts/) for your component. You should also define new env variables. You will need a variable for each specific resource your tests will use. If you require 3 different topics and 2 different tables for your tests you should have 5 different env variables set. The only convention you must follow for the variables is the value must use `$UNIQUE_ID` to ensure there are no conflicts with the resource names.
```bash
PUBSUB_AWS_SNSSQS_QUEUE="testQueue-${{ env.UNIQUE_ID }}"
echo "PUBSUB_AWS_SNSSQS_QUEUE=$PUBSUB_AWS_SNSSQS_QUEUE" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE=testQueue-${UNIQUE_ID}" >> $GITHUB_ENV
```
Take a look at the AWS DynamoDB [setup](../../.github/scripts/components-scripts/conformance-state.aws.dynamodb-setup.sh) and [teardown](../../.github/scripts/components-scripts/conformance-state.aws.dynamodb-destroy.sh) scripts as example.
2. When updating the `tests.yml` defined inside `tests/config/<COMPONENT-TYPE>/` folder you should overwrite the default names of any resources the conformance tests use. These values should reference env variables which should be defined in the conformance.yml.
```yaml
- component: aws.snssqs.terraform
- component: aws.snssqs.terraform
operations: ["publish", "subscribe", "multiplehandlers"]
config:
pubsubName: aws-snssqs
@ -148,22 +147,22 @@ terraform framework we have in place. To enable your component test to use terra
3. When writing your `component.yml` you should reference your credentials using env variables and any resources specified in the yaml should use env variables as well just as you did in the `test.yml`. Also if your component has an option that controls resource creation such as `disableEntityManagement` you will need to set it so it prohibits new resource creation. We want to use only terraform to provision resources and not dapr itself for these tests.
```yaml
metadata:
metadata:
- name: accessKey
value: ${{AWS_ACCESS_KEY_ID}}
value: ${{AWS_ACCESS_KEY_ID}}
- name: secretKey
value: ${{AWS_SECRET_ACCESS_KEY}}
value: ${{AWS_SECRET_ACCESS_KEY}}
- name: region
value: "us-east-1"
value: "us-east-1"
- name: consumerID
value: ${{PUBSUB_AWS_SNSSQS_QUEUE}}
value: ${{PUBSUB_AWS_SNSSQS_QUEUE}}
- name: disableEntityManagement
value: "true"
value: "true"
```
4. You will need to create a new terrafrorm file `component.tf` to provision your resources. The file should be placed in its own folder in the `.github/infrastructure/terraform/conformance` directory such as
`.github/infrastructure/terraform/conformance/pubsub/aws/snsqsq`. The terraform file should use a UNIQUE_ID variables and use this variables when naming its resources so they matched the names defined earlier. Make sure any resources your tests will use are defined in terraform.
`.github/infrastructure/terraform/conformance/pubsub/aws/snsqsq`. The terraform file should use a `UNIQUE_ID` variable and use this variables when naming its resources so they matched the names defined earlier. Make sure any resources your tests will use are defined in terraform.
```
variable "UNIQUE_ID" {
@ -172,23 +171,18 @@ terraform framework we have in place. To enable your component test to use terra
}
```
5. The component should be added to the `cron-components` step in conformance test workflow `.github/conformance.yml`. The component should have a variable named `terraform-dir` and the value should be the relative path from `.github/infrastructure/terraform/conformance` to the folder which the tests personal terraform files are located such as `pubsub/aws/snsqsq`.
5. Register your test in the file [/.github/scripts/test-info.mjs](../../.github/scripts/test-info.mjs) file, making sure to set `requiresTerraform: true`.
```
- component: pubsub.aws.snssqs.terraform
terraform-dir: pubsub/aws/snssqs
```
## Adding new AWS component in github actions
## Adding new AWS component in GitHub Actions
1. For tests involving aws components we use a service account to provision the resources needed. If you are contributing a brand new component you will need to make sure our account has sufficient permissions to provision resources and use handle component. A Dapr STC member will have to update the service account so contact them for assistance.
2. In your component yaml for your tests you should set the component metadata properties `accesskey` and `secretkey` to the values of `${{AWS_ACCESS_KEY_ID}}` and `${{AWS_SECRET_ACCESS_KEY}}`. These env values will contain the credentials for the testing service account.
```yaml
metadata:
metadata:
- name: accessKey
value: ${{AWS_ACCESS_KEY_ID}}
value: ${{AWS_ACCESS_KEY_ID}}
- name: secretKey
value: ${{AWS_SECRET_ACCESS_KEY}}
```
value: ${{AWS_SECRET_ACCESS_KEY}}
```

View File

@ -510,9 +510,9 @@ func loadSecretStore(tc TestComponent) secretstores.SecretStore {
store = ss_azure.NewAzureKeyvaultSecretStore(testLogger)
case "kubernetes":
store = ss_kubernetes.NewKubernetesSecretStore(testLogger)
case "localenv":
case "local.env":
store = ss_local_env.NewEnvSecretStore(testLogger)
case "localfile":
case "local.file":
store = ss_local_file.NewLocalSecretStore(testLogger)
case "hashicorp.vault":
store = ss_hashicorp_vault.NewHashiCorpVaultSecretStore(testLogger)

View File

@ -1,4 +1,4 @@
{
"conftestsecret": "abcd",
"secondsecret": "efgh"
}
}

View File

@ -15,7 +15,6 @@ package secretstores
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/assert"
@ -46,8 +45,8 @@ func NewTestConfig(name string, allOperations bool, operations []string) TestCon
func ConformanceTests(t *testing.T, props map[string]string, store secretstores.SecretStore, config TestConfig) {
// TODO add support for metadata
// For local env var based component test
os.Setenv("conftestsecret", "abcd")
defer os.Unsetenv("conftestsecret")
t.Setenv("conftestsecret", "abcd")
t.Setenv("secondsecret", "efgh")
// Init
t.Run("init", func(t *testing.T) {

View File

@ -2,21 +2,21 @@
"name": "Dapr Components E2E Environment - Zeebe",
"dockerComposeFile": ["./docker-compose.yml"],
"service": "dapr",
"extensions": [
"golang.go",
"ms-azuretools.vscode-dapr",
"ms-azuretools.vscode-docker",
"ms-kubernetes-tools.vscode-kubernetes-tools"
],
// As a workaround for Codespaces not supporting workspaceFolder/workspace mount, create
// a symlink from /workspaces/components-contrib to /go/src/github.com/dapr/components-contrib
"postCreateCommand": "bash /usr/local/share/setup-gopath.sh components-contrib true",
// On Linux, this will prevent new files getting created as root, but you
// may need to update the USER_UID and USER_GID in docker/Dockerfile-dev
// to match your user if not 1000.
"remoteUser": "dapr",
"extensions": [
"golang.go",
"ms-azuretools.vscode-dapr",
"ms-azuretools.vscode-docker",
"ms-kubernetes-tools.vscode-kubernetes-tools"
],
// As a workaround for Codespaces not supporting workspaceFolder/workspace mount, create
// a symlink from /workspaces/components-contrib to /go/src/github.com/dapr/components-contrib
"postCreateCommand": "bash /usr/local/share/setup-gopath.sh components-contrib true",
// On Linux, this will prevent new files getting created as root, but you
// may need to update the USER_UID and USER_GID in docker/Dockerfile-dev
// to match your user if not 1000.
"remoteUser": "dapr",
"settings": {
"terminal.integrated.defaultProfile.linux": "bash",
},
"workspaceFolder": "/go/src/github.com/dapr/components-contrib",
"terminal.integrated.defaultProfile.linux": "bash"
},
"workspaceFolder": "/go/src/github.com/dapr/components-contrib"
}

View File

@ -2,43 +2,38 @@
// https://github.com/microsoft/vscode-dev-containers/tree/v0.209.6/containers/docker-existing-docker-compose
// If you want to run as a non-root user in the container, see .devcontainer/docker-compose.yml.
{
"name": "Existing Docker Compose (Extend)",
"name": "Existing Docker Compose (Extend)",
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
"dockerComposeFile": [
"../docker-compose.yaml",
"docker-compose.yml"
],
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
"dockerComposeFile": ["../docker-compose.yaml", "docker-compose.yml"],
// The 'service' property is the name of the service for the container that VS Code should
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
"service": "dev",
// The 'service' property is the name of the service for the container that VS Code should
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
"service": "dev",
// The optional 'workspaceFolder' property is the path VS Code should open by default when
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
"workspaceFolder": "/workspace",
// The optional 'workspaceFolder' property is the path VS Code should open by default when
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
"workspaceFolder": "/workspace",
// Set *default* container specific settings.json values on container create.
"settings": {},
// Set *default* container specific settings.json values on container create.
"settings": {},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"golang.go"
]
// Add the IDs of extensions you want installed when the container is created.
"extensions": ["golang.go"]
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line if you want start specific services in your Docker Compose config.
// "runServices": [],
// Uncomment the next line if you want start specific services in your Docker Compose config.
// "runServices": [],
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
// "shutdownAction": "none",
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
// "shutdownAction": "none",
// Uncomment the next line to run commands after the container is created - for example installing curl.
// "postCreateCommand": "apt-get update && apt-get install -y curl",
// Uncomment the next line to run commands after the container is created - for example installing curl.
// "postCreateCommand": "apt-get update && apt-get install -y curl",
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode"
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode"
}

View File

@ -1,4 +1,3 @@
{
"page": {
"limit": 2

View File

@ -1,4 +1,3 @@
{
"filter": {
"EQ": {

View File

@ -1,4 +1,3 @@
{
"filter": {
"EQ": {

View File

@ -8,7 +8,7 @@
},
{
"IN": {
"state":["CA", "WA"]
"state": ["CA", "WA"]
}
}
]