Merge branch 'v1.13' into patch-2

This commit is contained in:
Hannah Hunter 2024-01-31 14:48:07 -05:00 committed by GitHub
commit 93d2bf60bc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 279 additions and 126 deletions

View File

@ -1,109 +0,0 @@
name: Azure Static Web App Root
on:
workflow_dispatch:
push:
branches:
- v1.12
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v1.12
concurrency:
# Cancel the previously triggered build for only PR build.
group: website-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
build_and_deploy_job:
name: Build Hugo Website
if: github.event.action != 'closed'
runs-on: ubuntu-latest
env:
SWA_BASE: 'proud-bay-0e9e0e81e'
HUGO_ENV: production
steps:
- name: Checkout docs repo
uses: actions/checkout@v3
with:
submodules: true
- name: Setup Node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Setup Hugo
uses: peaceiris/actions-hugo@v2.5.0
with:
hugo-version: 0.102.3
extended: true
- name: Setup Docsy
run: |
cd daprdocs
git submodule update --init --recursive
sudo npm install -D --save autoprefixer
sudo npm install -D --save postcss-cli
- name: Build Hugo Website
run: |
cd daprdocs
git config --global --add safe.directory /github/workspace
if [ $GITHUB_EVENT_NAME == 'pull_request' ]; then
STAGING_URL="https://${SWA_BASE}-${{github.event.number}}.westus2.azurestaticapps.net/"
fi
hugo ${STAGING_URL+-b "$STAGING_URL"}
- name: Deploy docs site
uses: Azure/static-web-apps-deploy@v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
repo_token: ${{ secrets.GITHUB_TOKEN }}
action: "upload"
app_location: "daprdocs/public/"
api_location: "daprdocs/public/"
output_location: ""
skip_app_build: true
skip_deploy_on_missing_secrets: true
- name: Upload Hugo artifacts
uses: actions/upload-artifact@v3
with:
name: hugo_build
path: ./daprdocs/public/
if-no-files-found: error
close_staging_site:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
steps:
- name: Close Pull Request
id: closepullrequest
uses: Azure/static-web-apps-deploy@v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
action: "close"
skip_deploy_on_missing_secrets: true
algolia_index:
name: Index site for Algolia
if: github.event_name == 'push'
needs: ['build_and_deploy_job']
runs-on: ubuntu-latest
env:
ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }}
ALGOLIA_API_WRITE_KEY: ${{ secrets.ALGOLIA_API_WRITE_KEY }}
ALGOLIA_INDEX_NAME: daprdocs
steps:
- name: Checkout docs repo
uses: actions/checkout@v2
with:
submodules: false
- name: Download Hugo artifacts
uses: actions/download-artifact@v3
with:
name: hugo_build
path: site/
- name: Install Python packages
run: |
pip install --upgrade bs4
pip install --upgrade 'algoliasearch>=2.0,<3.0'
- name: Index site
run: python ./.github/scripts/algolia.py ./site

View File

@ -1,13 +1,13 @@
name: Azure Static Web App v1.12 name: Azure Static Web App v1.13
on: on:
push: push:
branches: branches:
- v1.12 - v1.13
pull_request: pull_request:
types: [opened, synchronize, reopened, closed] types: [opened, synchronize, reopened, closed]
branches: branches:
- v1.12 - v1.13
jobs: jobs:
build_and_deploy_job: build_and_deploy_job:
@ -28,7 +28,7 @@ jobs:
HUGO_ENV: production HUGO_ENV: production
HUGO_VERSION: "0.100.2" HUGO_VERSION: "0.100.2"
with: with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_12 }} azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_13 }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments) repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
skip_deploy_on_missing_secrets: true skip_deploy_on_missing_secrets: true
action: "upload" action: "upload"
@ -49,6 +49,6 @@ jobs:
id: closepullrequest id: closepullrequest
uses: Azure/static-web-apps-deploy@v1 uses: Azure/static-web-apps-deploy@v1
with: with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_12 }} azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_13 }}
skip_deploy_on_missing_secrets: true skip_deploy_on_missing_secrets: true
action: "close" action: "close"

View File

@ -1,5 +1,5 @@
# Site Configuration # Site Configuration
baseURL = "https://docs.dapr.io" baseURL = "https://v1-13.docs.dapr.io"
title = "Dapr Docs" title = "Dapr Docs"
theme = "docsy" theme = "docsy"
disableFastRender = true disableFastRender = true
@ -183,17 +183,17 @@ github_subdir = "daprdocs"
github_branch = "v1.12" github_branch = "v1.12"
# Versioning # Versioning
version_menu = "v1.12 (latest)" version_menu = "v1.13 (preview)"
version = "v1.12" version = "v1.13"
archived_version = false archived_version = false
url_latest_version = "https://docs.dapr.io" url_latest_version = "https://docs.dapr.io"
[[params.versions]] [[params.versions]]
version = "v1.13 (preview)" version = "v1.13 (preview)"
url = "https://v1-13.docs.dapr.io" url = "#"
[[params.versions]] [[params.versions]]
version = "v1.12 (latest)" version = "v1.12 (latest)"
url = "#" url = "https://docs.dapr.io"
[[params.versions]] [[params.versions]]
version = "v1.11" version = "v1.11"
url = "https://v1-11.docs.dapr.io" url = "https://v1-11.docs.dapr.io"

View File

@ -52,6 +52,14 @@ For example:
For more information read [Pluggable components overview]({{< ref "pluggable-components-overview" >}}) For more information read [Pluggable components overview]({{< ref "pluggable-components-overview" >}})
## Hot Reloading
With the [`HotReload` feature enabled]({{< ref "support-preview-features.md" >}}), components are able to be "hot reloaded" at runtime.
This means that you can update component configuration without restarting the Dapr runtime.
Component reloading occurs when a component resource is created, updated, or deleted, either in the Kubernetes API or in self-hosted mode when a file is changed in the `resources` directory.
When a component is updated, the component is first closed, and then reinitialized using the new configuration.
The component is unavailable for a short period of time during reload and reinitialization.
## Available component types ## Available component types
The following are the component types provided by Dapr: The following are the component types provided by Dapr:

View File

@ -6,18 +6,27 @@ weight: 300
description: "Updating deployed components used by applications" description: "Updating deployed components used by applications"
--- ---
When making an update to an existing deployed component used by an application, Dapr does not update the component automatically. The Dapr sidecar needs to be restarted in order to pick up the latest version of the component. How this done depends on the hosting environment. When making an update to an existing deployed component used by an application, Dapr does not update the component automatically unless the `HotReload` feature gate is enabled.
The Dapr sidecar needs to be restarted in order to pick up the latest version of the component.
How this is done depends on the hosting environment.
{{% alert title="Note" color="primary" %}}
Dapr can be made to "hot reload" components, where updates are picked up automatically without needing a restart.
This is enabled by via the [`HotReload` feature gate]({{< ref "support-preview-features.md" >}}).
All component types are supported for hot reloading.
This feature is currently in preview.
{{% /alert %}}
## Kubernetes ## Kubernetes
When running in Kubernetes, the process of updating a component involves two steps: When running in Kubernetes, the process of updating a component involves two steps:
1. Applying the new component YAML to the desired namespace 1. Apply the new component YAML to the desired namespace
2. Performing a [rollout restart operation](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#updating-resources) on your deployments to pick up the latest component 1. Unless the [`HotReload` feature gate is enabled]({{< ref "support-preview-features.md" >}}), perform a [rollout restart operation](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#updating-resources) on your deployments to pick up the latest component
## Self Hosted ## Self Hosted
When running in Self Hosted mode, the process of updating a component involves a single step of stopping the `daprd` process and starting it again to pick up the latest component. Unless the [`HotReload` feature gate is enabled]({{< ref "support-preview-features.md" >}}), the process of updating a component involves a single step of stopping and restarting the `daprd` process to pick up the latest component.
## Further reading ## Further reading
- [Components concept]({{< ref components-concept.md >}}) - [Components concept]({{< ref components-concept.md >}})

View File

@ -22,4 +22,4 @@ For CLI there is no explicit opt-in, just the version that this was first made a
| **Service invocation for non-Dapr endpoints** | Allow the invocation of non-Dapr endpoints by Dapr using the [Service invocation API]({{< ref service_invocation_api.md >}}). Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information. | N/A | [Service invocation API]({{< ref service_invocation_api.md >}}) | v1.11 | | **Service invocation for non-Dapr endpoints** | Allow the invocation of non-Dapr endpoints by Dapr using the [Service invocation API]({{< ref service_invocation_api.md >}}). Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information. | N/A | [Service invocation API]({{< ref service_invocation_api.md >}}) | v1.11 |
| **Actor State TTL** | Allow actors to save records to state stores with Time To Live (TTL) set to automatically clean up old data. In its current implementation, actor state with TTL may not be reflected correctly by clients, read [Actor State Transactions]({{< ref actors_api.md >}}) for more information. | `ActorStateTTL` | [Actor State Transactions]({{< ref actors_api.md >}}) | v1.11 | | **Actor State TTL** | Allow actors to save records to state stores with Time To Live (TTL) set to automatically clean up old data. In its current implementation, actor state with TTL may not be reflected correctly by clients, read [Actor State Transactions]({{< ref actors_api.md >}}) for more information. | `ActorStateTTL` | [Actor State Transactions]({{< ref actors_api.md >}}) | v1.11 |
| **Transactional Outbox** | Allows state operations for inserts and updates to be published to a configured pub/sub topic using a single transaction across the state store and the pub/sub | N/A | [Transactional Outbox Feature]({{< ref howto-outbox.md >}}) | v1.12 | | **Transactional Outbox** | Allows state operations for inserts and updates to be published to a configured pub/sub topic using a single transaction across the state store and the pub/sub | N/A | [Transactional Outbox Feature]({{< ref howto-outbox.md >}}) | v1.12 |
| **Component Hot Reloading** | Allows for Dapr-loaded components to be "hot reloaded". A component spec is reloaded when it is created/updated/deleted in Kubernetes or on file when running in self-hosted mode.| `HotReload`| [Hot Reloading]({{< ref components-concept.md >}}) | v1.13 |

View File

@ -46,6 +46,7 @@ This table is meant to help users understand the equivalent options for running
| `--sentry-address` | `--sentry-address` | | not supported | Address for the [Sentry CA service]({{< ref sentry >}}) | | `--sentry-address` | `--sentry-address` | | not supported | Address for the [Sentry CA service]({{< ref sentry >}}) |
| `--version` | `--version` | `-v` | not supported | Prints the runtime version | | `--version` | `--version` | `-v` | not supported | Prints the runtime version |
| `--dapr-graceful-shutdown-seconds` | not supported | | `dapr.io/graceful-shutdown-seconds` | Graceful shutdown duration in seconds for Dapr, the maximum duration before forced shutdown when waiting for all in-progress requests to complete. Defaults to `5`. If you are running in Kubernetes mode, this value should not be larger than the Kubernetes termination grace period, who's default value is `30`.| | `--dapr-graceful-shutdown-seconds` | not supported | | `dapr.io/graceful-shutdown-seconds` | Graceful shutdown duration in seconds for Dapr, the maximum duration before forced shutdown when waiting for all in-progress requests to complete. Defaults to `5`. If you are running in Kubernetes mode, this value should not be larger than the Kubernetes termination grace period, who's default value is `30`.|
| `--dapr-block-shutdown-duration` | not supported | | `dapr.io/block-shutdown-duration` | Block shutdown duration, if set, blocks the graceful shutdown procedure (as described above) from starting until the given duration has elapsed or the application becomes unhealthy as configured through application health options. This is useful for applications that need to execute Dapr APIs during their own termination procedure. Any new invocations of any Dapr APIs are not available to the application once the block has expired. Accepts [Go duration](https://pkg.go.dev/time#ParseDuration) string. |
| not supported | not supported | | `dapr.io/enabled` | Setting this paramater to true injects the Dapr sidecar into the pod | | not supported | not supported | | `dapr.io/enabled` | Setting this paramater to true injects the Dapr sidecar into the pod |
| not supported | not supported | | `dapr.io/api-token-secret` | Tells Dapr which Kubernetes secret to use for [token-based API authentication]({{< ref api-token >}}). By default this is not set | | not supported | not supported | | `dapr.io/api-token-secret` | Tells Dapr which Kubernetes secret to use for [token-based API authentication]({{< ref api-token >}}). By default this is not set |
| not supported | not supported | | `dapr.io/app-token-secret` | Tells Dapr which Kubernetes secret to use for [token-based application authentication]({{< ref app-api-token >}}). By default, this is not set | | not supported | not supported | | `dapr.io/app-token-secret` | Tells Dapr which Kubernetes secret to use for [token-based application authentication]({{< ref app-api-token >}}). By default, this is not set |

View File

@ -49,6 +49,16 @@ spec:
value: "2.0.0" value: "2.0.0"
- name: direction - name: direction
value: "input, output" value: "input, output"
- name: schemaRegistryURL # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry URL.
value: http://localhost:8081
- name: schemaRegistryAPIKey # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry API Key.
value: XYAXXAZ
- name: schemaRegistryAPISecret # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Secret.
value: "ABCDEFGMEADFF"
- name: schemaCachingEnabled # Optional. When using Schema Registry Avro serialization/deserialization. Enables caching for schemas.
value: true
- name: schemaLatestVersionCacheTTL # Optional. When using Schema Registry Avro serialization/deserialization. The TTL for schema caching when publishing a message with latest schema available.
value: 5m
``` ```
## Spec metadata fields ## Spec metadata fields
@ -75,6 +85,11 @@ spec:
| `version` | N | Input/Output | Kafka cluster version. Defaults to 2.0.0. Please note that this needs to be mandatorily set to `1.0.0` for EventHubs with Kafka. | `"1.0.0"` | | `version` | N | Input/Output | Kafka cluster version. Defaults to 2.0.0. Please note that this needs to be mandatorily set to `1.0.0` for EventHubs with Kafka. | `"1.0.0"` |
| `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` | | `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` |
| `oidcExtensions` | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` | | `oidcExtensions` | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` |
| `schemaRegistryURL` | N | Required when using Schema Registry Avro serialization/deserialization. The Schema Registry URL. | `http://localhost:8081` |
| `schemaRegistryAPIKey` | N | When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Key. | `XYAXXAZ` |
| `schemaRegistryAPISecret` | N | When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Secret. | `ABCDEFGMEADFF` |
| `schemaCachingEnabled` | N | When using Schema Registry Avro serialization/deserialization. Enables caching for schemas. Default is `true` | `true` |
| `schemaLatestVersionCacheTTL` | N | When using Schema Registry Avro serialization/deserialization. The TTL for schema caching when publishing a message with latest schema available. Default is 5 min | `5m` |
#### Note #### Note
The metadata `version` must be set to `1.0.0` when using Azure EventHubs with Kafka. The metadata `version` must be set to `1.0.0` when using Azure EventHubs with Kafka.

View File

@ -49,6 +49,17 @@ spec:
value: 2.0.0 value: 2.0.0
- name: disableTls # Optional. Disable TLS. This is not safe for production!! You should read the `Mutual TLS` section for how to use TLS. - name: disableTls # Optional. Disable TLS. This is not safe for production!! You should read the `Mutual TLS` section for how to use TLS.
value: "true" value: "true"
- name: schemaRegistryURL # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry URL.
value: http://localhost:8081
- name: schemaRegistryAPIKey # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry API Key.
value: XYAXXAZ
- name: schemaRegistryAPISecret # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Secret.
value: "ABCDEFGMEADFF"
- name: schemaCachingEnabled # Optional. When using Schema Registry Avro serialization/deserialization. Enables caching for schemas.
value: true
- name: schemaLatestVersionCacheTTL # Optional. When using Schema Registry Avro serialization/deserialization. The TTL for schema caching when publishing a message with latest schema available.
value: 5m
``` ```
> For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}). > For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}).
@ -81,6 +92,11 @@ spec:
| oidcClientSecret | N | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` | | oidcClientSecret | N | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` |
| oidcScopes | N | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` | | oidcScopes | N | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` |
| oidcExtensions | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` | | oidcExtensions | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` |
| schemaRegistryURL | N | Required when using Schema Registry Avro serialization/deserialization. The Schema Registry URL. | `http://localhost:8081` |
| schemaRegistryAPIKey | N | When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Key. | `XYAXXAZ` |
| schemaRegistryAPISecret | N | When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Secret. | `ABCDEFGMEADFF` |
| schemaCachingEnabled | N | When using Schema Registry Avro serialization/deserialization. Enables caching for schemas. Default is `true` | `true` |
| schemaLatestVersionCacheTTL | N | When using Schema Registry Avro serialization/deserialization. The TTL for schema caching when publishing a message with latest schema available. Default is 5 min | `5m` |
The `secretKeyRef` above is referencing a [kubernetes secrets store]({{< ref kubernetes-secret-store.md >}}) to access the tls information. Visit [here]({{< ref setup-secret-store.md >}}) to learn more about how to configure a secret store component. The `secretKeyRef` above is referencing a [kubernetes secrets store]({{< ref kubernetes-secret-store.md >}}) to access the tls information. Visit [here]({{< ref setup-secret-store.md >}}) to learn more about how to configure a secret store component.
@ -386,6 +402,103 @@ curl -X POST http://localhost:3500/v1.0/publish/myKafka/myTopic?metadata.correla
}' }'
``` ```
## Avro Schema Registry serialization/deserialization
You can configure pub/sub to publish or consume data encoded using [Avro binary serialization](https://avro.apache.org/docs/), leveraging an [Apache Schema Registry](https://developer.confluent.io/courses/apache-kafka/schema-registry/) (for example, [Confluent Schema Registry](https://developer.confluent.io/courses/apache-kafka/schema-registry/), [Apicurio](https://www.apicur.io/registry/)).
### Configuration
{{% alert title="Important" color="warning" %}}
Currently, only message value serialization/deserialization is supported. Since cloud events are not supported, the `rawPayload=true` metadata must be passed.
{{% /alert %}}
When configuring the Kafka pub/sub component metadata, you must define:
- The schema registry URL
- The API key/secret, if applicable
Schema subjects are automatically derived from topic names, using the standard naming convention. For example, for a topic named `my-topic`, the schema subject will be `my-topic-value`.
When interacting with the message payload within the service, it is in JSON format. The payload is transparently serialized/deserialized within the Dapr component.
Date/Datetime fields must be passed as their [Epoch Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) equivalent (rather than typical Iso8601). For example:
- `2024-01-10T04:36:05.986Z` should be passed as `1704861365986` (the number of milliseconds since Jan 1st, 1970)
- `2024-01-10` should be passed as `19732` (the number of days since Jan 1st, 1970)
### Publishing Avro messages
In order to indicate to the Kafka pub/sub component that the message should be using Avro serialization, the `valueSchemaType` metadata must be set to `Avro`.
{{< tabs curl "Python SDK">}}
{{% codetab %}}
```bash
curl -X "POST" http://localhost:3500/v1.0/publish/pubsub/my-topic?metadata.rawPayload=true&metadata.valueSchemaType=Avro -H "Content-Type: application/json" -d '{"order_number": "345", "created_date": 1704861365986}'
```
{{% /codetab %}}
{{% codetab %}}
```python
from dapr.clients import DaprClient
with DaprClient() as d:
req_data = {
'order_number': '345',
'created_date': 1704861365986
}
# Create a typed message with content type and body
resp = d.publish_event(
pubsub_name='pubsub',
topic_name='my-topic',
data=json.dumps(req_data),
publish_metadata={'rawPayload': 'true', 'valueSchemaType': 'Avro'}
)
# Print the request
print(req_data, flush=True)
```
{{% /codetab %}}
{{< /tabs >}}
### Subscribing to Avro topics
In order to indicate to the Kafka pub/sub component that the message should be deserialized using Avro, the `valueSchemaType` metadata must be set to `Avro` in the subscription metadata.
{{< tabs "Python (FastAPI)" >}}
{{% codetab %}}
```python
from fastapi import APIRouter, Body, Response, status
import json
import sys
app = FastAPI()
router = APIRouter()
@router.get('/dapr/subscribe')
def subscribe():
subscriptions = [{'pubsubname': 'pubsub',
'topic': 'my-topic',
'route': 'my_topic_subscriber',
'metadata': {
'rawPayload': 'true',
'valueSchemaType': 'Avro',
} }]
return subscriptions
@router.post('/my_topic_subscriber')
def my_topic_subscriber(event_data=Body()):
print(event_data, flush=True)
return Response(status_code=status.HTTP_200_OK)
app.include_router(router)
```
{{% /codetab %}}
{{< /tabs >}}
## Create a Kafka instance ## Create a Kafka instance
{{< tabs "Self-Hosted" "Kubernetes">}} {{< tabs "Self-Hosted" "Kubernetes">}}

View File

@ -9,15 +9,16 @@ aliases:
## Component format ## Component format
To set up AWS SNS/SQS pub/sub, create a component of type `pubsub.aws.snssqs`. To set up AWS SNS/SQS pub/sub, create a component of type `pubsub.aws.snssqs`.
By default, the AWS SNS/SQS component: By default, the AWS SNS/SQS component:
- Generates the SNS topics - Generates the SNS topics
- Provisions the SQS queues - Provisions the SQS queues
- Configures a subscription of the queues to the topics - Configures a subscription of the queues to the topics
{{% alert title="Note" color="primary" %}} {{% alert title="Note" color="primary" %}}
If you only have a publisher and no subscriber, only the SNS topics are created. If you only have a publisher and no subscriber, only the SNS topics are created.
However, if you have a subscriber, SNS, SQS, and the dynamic or static subscription thereof are generated. However, if you have a subscriber, SNS, SQS, and the dynamic or static subscription thereof are generated.
{{% /alert %}} {{% /alert %}}
@ -133,6 +134,22 @@ When configuring the PubSub component with SQS dead-letter queues, the metadata
When running the Dapr sidecar (`daprd`) with your application on EKS (AWS Kubernetes) node/pod already attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec. When running the Dapr sidecar (`daprd`) with your application on EKS (AWS Kubernetes) node/pod already attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec.
{{% /alert %}} {{% /alert %}}
#### SNS/SQS Contention with Dapr
Fundamentally, SNS aggregates messages from multiple publisher topics into a single SQS queue by creating SQS subscriptions to those topics. As a subscriber, the SNS/SQS pub/sub component consumes messages from that sole SQS queue.
However, like any SQS consumer, the component cannot selectively retrieve the messages published to the SNS topics to which it is specifically subscribed. This can result in the component receiving messages originating from topics without associated handlers. Typically, this occurs during:
- **Component initialization:** If infrastructure subscriptions are ready before component subscription handlers, or
- **Shutdown:** If component handlers are removed before infrastructure subscriptions.
Since this issue affects any SQS consumer of multiple SNS topics, the component cannot prevent consuming messages from topics lacking handlers. When this happens, the component logs an error indicating such messages were erroneously retrieved.
In these situations, the unhandled messages would reappear in SQS with their [receive count](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html#sqs-receive-count) decremented after each pull. Thus, there is a risk that an unhandled message could exceed its `messageReceiveLimit` and be lost.
{{% alert title="Important" color="warning" %}}
Consider potential contention scenarios when using SNS/SQS with Dapr, and configure `messageReceiveLimit` appropriately. It is highly recommended to use SQS dead-letter queues by setting `sqsDeadLettersQueueName` to prevent losing messages.
{{% /alert %}}
## Create an SNS/SQS instance ## Create an SNS/SQS instance

View File

@ -0,0 +1,99 @@
---
type: docs
title: Dapr errors
linkTitle: "Dapr errors"
weight: 700
description: "Information on Dapr errors and how to handle them"
---
## Error handling: Understanding errors model and reporting
Initially, errors followed the [Standard gRPC error model](https://grpc.io/docs/guides/error/#standard-error-model). However, to provide more detailed and informative error messages, an enhanced error model has been defined which aligns with the gRPC [Richer error model](https://grpc.io/docs/guides/error/#richer-error-model).
{{% alert title="Note" color="primary" %}}
Not all Dapr errors have been converted to the richer gRPC error model.
{{% /alert %}}
### Standard gRPC Error Model
The [Standard gRPC error model](https://grpc.io/docs/guides/error/#standard-error-model) is an approach to error reporting in gRPC. Each error response includes an error code and an error message. The error codes are standardized and reflect common error conditions.
**Example of a Standard gRPC Error Response:**
```
ERROR:
Code: InvalidArgument
Message: input key/keyPrefix 'bad||keyname' can't contain '||'
```
### Richer gRPC Error Model
The [Richer gRPC error model](https://grpc.io/docs/guides/error/#richer-error-model) extends the standard error model by providing additional context and details about the error. This model includes the standard error `code` and `message`, along with a `details` section that can contain various types of information, such as `ErrorInfo`, `ResourceInfo`, and `BadRequest` details.
**Example of a Richer gRPC Error Response:**
```
ERROR:
Code: InvalidArgument
Message: input key/keyPrefix 'bad||keyname' can't contain '||'
Details:
1) {
"@type": "type.googleapis.com/google.rpc.ErrorInfo",
"domain": "dapr.io",
"reason": "DAPR_STATE_ILLEGAL_KEY"
}
2) {
"@type": "type.googleapis.com/google.rpc.ResourceInfo",
"resourceName": "statestore",
"resourceType": "state"
}
3) {
"@type": "type.googleapis.com/google.rpc.BadRequest",
"fieldViolations": [
{
"field": "bad||keyname",
"description": "input key/keyPrefix 'bad||keyname' can't contain '||'"
}
]
}
```
For HTTP clients, Dapr translates the gRPC error model to a similar structure in JSON format. The response includes an `errorCode`, a `message`, and a `details` array that mirrors the structure found in the richer gRPC model.
**Example of an HTTP error response:**
```json
{
"errorCode": "ERR_MALFORMED_REQUEST",
"message": "api error: code = InvalidArgument desc = input key/keyPrefix 'bad||keyname' can't contain '||'",
"details": [
{
"@type": "type.googleapis.com/google.rpc.ErrorInfo",
"domain": "dapr.io",
"metadata": null,
"reason": "DAPR_STATE_ILLEGAL_KEY"
},
{
"@type": "type.googleapis.com/google.rpc.ResourceInfo",
"description": "",
"owner": "",
"resource_name": "statestore",
"resource_type": "state"
},
{
"@type": "type.googleapis.com/google.rpc.BadRequest",
"field_violations": [
{
"field": "bad||keyname",
"description": "api error: code = InvalidArgument desc = input key/keyPrefix 'bad||keyname' can't contain '||'"
}
]
}
]
}
```
You can find the specification of all the possible status details [here](https://github.com/googleapis/googleapis/blob/master/google/rpc/error_details.proto).
## Related Links
- [Authoring error codes](https://github.com/dapr/dapr/tree/master/pkg/api/errors)
- [Using error codes in the Go SDK](https://docs.dapr.io/developing-applications/sdks/go/go-client/#error-handling)