Merge branch 'v1.11' into issue_3423

This commit is contained in:
Hannah Hunter 2023-06-21 16:56:05 -04:00 committed by GitHub
commit 46e32f5c70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 723 additions and 295 deletions

118
.github/scripts/algolia.py vendored Normal file
View File

@ -0,0 +1,118 @@
import os
from re import S
import sys
import json
from bs4 import BeautifulSoup
from algoliasearch.search_client import SearchClient
url = "docs.dapr.io"
if len(sys.argv) > 1:
starting_directory = os.path.join(os.getcwd(), str(sys.argv[1]))
else:
starting_directory = os.getcwd()
ALGOLIA_APP_ID = os.getenv('ALGOLIA_APP_ID')
ALGOLIA_API_KEY = os.getenv('ALGOLIA_API_WRITE_KEY')
ALGOLIA_INDEX_NAME = os.getenv('ALGOLIA_INDEX_NAME')
client = SearchClient.create(ALGOLIA_APP_ID, ALGOLIA_API_KEY)
index = client.init_index(ALGOLIA_INDEX_NAME)
excluded_files = [
"404.html",
]
exluded_directories = [
"zh-hans",
]
rankings = {
"Getting started": 0,
"Concepts": 100,
"Developing applications": 200,
"Operations": 300,
"Reference": 400,
"Contributing": 500,
"Home": 600
}
def scan_directory(directory: str, pages: list):
if os.path.basename(directory) in exluded_directories:
print(f'Skipping directory: {directory}')
return
for file in os.listdir(directory):
path = os.path.join(directory, file)
if os.path.isfile(path):
if file.endswith(".html") and file not in excluded_files:
if '<!-- DISABLE_ALGOLIA -->' not in open(path, encoding="utf8").read():
print(f'Indexing: {path}')
pages.append(path)
else:
print(f'Skipping hidden page: {path}')
else:
scan_directory(path, pages)
def parse_file(path: str):
data = {}
data["hierarchy"] = {}
data["rank"] = 999
data["subrank"] = 99
data["type"] = "lvl2"
data["lvl0"] = ""
data["lvl1"] = ""
data["lvl2"] = ""
data["lvl3"] = ""
text = ""
subrank = 0
with open(path, "r", errors='ignore') as file:
content = file.read()
soup = BeautifulSoup(content, "html.parser")
for meta in soup.find_all("meta"):
if meta.get("name") == "description":
data["lvl2"] = meta.get("content")
data["hierarchy"]["lvl1"] = meta.get("content")
elif meta.get("property") == "og:title":
data["lvl0"] = meta.get("content")
data["hierarchy"]["lvl0"] = meta.get("content")
data["hierarchy"]["lvl2"] = meta.get("content")
elif meta.get("property") == "og:url":
data["url"] = meta.get("content")
data["path"] = meta.get("content").split(url)[1]
data["objectID"] = meta.get("content").split(url)[1]
breadcrumbs = soup.find_all("li", class_="breadcrumb-item")
try:
subrank = len(breadcrumbs)
data["subrank"] = subrank
except:
subrank = 99
data["subrank"] = 99
for bc in breadcrumbs:
section = bc.text.strip()
data["lvl1"] = section
data["hierarchy"]["lvl0"] = section
try:
data["rank"] = rankings[section] + subrank
except:
print(f"Rank not found for section {section}")
data["rank"] = 998
break
for p in soup.find_all("p"):
if p.text != "":
text = text + p.text
data["text"] = text
return data
def index_payload(payload):
res = index.replace_all_objects(payload)
res.wait()
if __name__ == "__main__":
pages = []
payload = []
scan_directory(starting_directory, pages)
for page in pages:
data = parse_file(page)
if "objectID" in data:
payload.append(data)
index_payload(payload)

View File

@ -1,6 +1,7 @@
name: Azure Static Web App Root
on:
workflow_dispatch:
push:
branches:
- v1.11
@ -9,35 +10,66 @@ on:
branches:
- v1.11
concurrency:
# Cancel the previously triggered build for only PR build.
group: website-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
build_and_deploy_job:
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed')
name: Build Hugo Website
if: github.event.action != 'closed'
runs-on: ubuntu-latest
name: Build and Deploy Job
env:
SWA_BASE: 'proud-bay-0e9e0e81e'
HUGO_ENV: production
steps:
- uses: actions/checkout@v3
- name: Checkout docs repo
uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
submodules: true
- name: Setup Node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Setup Hugo
uses: peaceiris/actions-hugo@v2.5.0
with:
hugo-version: 0.102.3
extended: true
- name: Setup Docsy
run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli
- name: Build And Deploy
id: builddeploy
run: |
cd daprdocs
git submodule update --init --recursive
sudo npm install -D --save autoprefixer
sudo npm install -D --save postcss-cli
- name: Build Hugo Website
run: |
cd daprdocs
git config --global --add safe.directory /github/workspace
if [ $GITHUB_EVENT_NAME == 'pull_request' ]; then
STAGING_URL="https://${SWA_BASE}-${{github.event.number}}.westus2.azurestaticapps.net/"
fi
hugo ${STAGING_URL+-b "$STAGING_URL"}
- name: Deploy docs site
uses: Azure/static-web-apps-deploy@v1
env:
HUGO_ENV: production
HUGO_VERSION: "0.100.2"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
skip_deploy_on_missing_secrets: true
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
repo_token: ${{ secrets.GITHUB_TOKEN }}
action: "upload"
app_location: "/daprdocs"
app_build_command: "git config --global --add safe.directory /github/workspace && hugo"
output_location: "public"
skip_api_build: true
app_location: "daprdocs/public/"
api_location: "daprdocs/public/"
output_location: ""
skip_app_build: true
skip_deploy_on_missing_secrets: true
- name: Upload Hugo artifacts
uses: actions/upload-artifact@v3
with:
name: hugo_build
path: ./daprdocs/public/
if-no-files-found: error
close_pull_request_job:
close_staging_site:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
@ -48,3 +80,30 @@ jobs:
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
action: "close"
skip_deploy_on_missing_secrets: true
algolia_index:
name: Index site for Algolia
if: github.event_name == 'push'
needs: ['build_and_deploy_job']
runs-on: ubuntu-latest
env:
ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }}
ALGOLIA_API_WRITE_KEY: ${{ secrets.ALGOLIA_API_WRITE_KEY }}
ALGOLIA_INDEX_NAME: daprdocs
steps:
- name: Checkout docs repo
uses: actions/checkout@v2
with:
submodules: false
- name: Download Hugo artifacts
uses: actions/download-artifact@v3
with:
name: hugo_build
path: site/
- name: Install Python packages
run: |
pip install --upgrade bs4
pip install --upgrade 'algoliasearch>=2.0,<3.0'
- name: Index site
run: python ./.github/scripts/algolia.py ./site

View File

@ -1,38 +1,12 @@
// Code formatting.
.copy-code-button {
color: #272822;
background-color: #FFF;
border-color: #0D2192;
border: 2px solid;
border-radius: 3px 3px 0px 0px;
/* right-align */
display: block;
margin-left: auto;
margin-right: 0;
margin-bottom: -2px;
padding: 3px 8px;
font-size: 0.8em;
.highlight .copy-icon {
position: absolute;
right: 20px;
top: 18px;
opacity: 0.7;
}
.copy-code-button:hover {
cursor: pointer;
background-color: #F2F2F2;
}
.copy-code-button:focus {
/* Avoid an ugly focus outline on click in Chrome,
but darken the button for accessibility.
See https://stackoverflow.com/a/25298082/1481479 */
background-color: #E6E6E6;
outline: 0;
}
.copy-code-button:active {
background-color: #D9D9D9;
}
.highlight pre {
/* Avoid pushing up the copy buttons. */
@ -40,25 +14,31 @@
}
.td-content {
// Highlighted code.
// Highlighted code.
.highlight {
@extend .card;
margin: 0rem 0;
padding: 0rem;
margin-bottom: 2rem;
max-width: 100%;
border: none;
pre {
margin: 0;
padding: 1rem;
border-radius: 10px;
}
}
// Inline code
p code, li > code, table code {
p code,
li>code,
table code {
color: inherit;
padding: 0.2em 0.4em;
margin: 0;
@ -78,11 +58,11 @@
word-wrap: normal;
background-color: $gray-100;
padding: $spacer;
max-width: 100%;
> code {
background-color: inherit !important;
>code {
background-color: inherit !important;
padding: 0;
margin: 0;
font-size: 100%;

View File

@ -39,11 +39,11 @@ Style and tone conventions should be followed throughout all Dapr documentation
## Diagrams and images
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/presentations), which includes guidance on style and icons.
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/presentations), which includes guidance on style and icons.
As you create diagrams for your documentation:
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/images).
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/images).
- Name your PNG files using the convention of a concept or building block so that they are grouped.
- For example: `service-invocation-overview.png`.
- For more information on calling out images using shortcode, see the [Images guidance](#images) section below.
@ -458,4 +458,4 @@ Steps to add a language:
## Next steps
Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}).
Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}).

View File

@ -130,7 +130,7 @@ You can add additional fields to a custom CloudEvent that are not part of the of
Publish a CloudEvent to the `orders` topic:
```bash
dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{"specversion" : "1.0", "type" : "com.dapr.cloudevent.sent", "source" : "testcloudeventspubsub", "subject" : "Cloud Events Test", "id" : "someCloudEventId", "time" : "2021-08-02T09:00:00Z", "datacontenttype" : "application/cloudevents+json", "data" : {"orderId": "100"}}'
dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{\"orderId\": \"100\"}'
```
{{% /codetab %}}

View File

@ -25,13 +25,14 @@ The diagram below is an example of how dead letter topics work. First a message
The following YAML shows how to configure a subscription with a dead letter topic named `poisonMessages` for messages consumed from the `orders` topic. This subscription is scoped to an app with a `checkout` ID.
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: order
spec:
topic: orders
route: /checkout
routes:
default: /checkout
pubsubname: pubsub
deadLetterTopic: poisonMessages
scopes:
@ -86,13 +87,16 @@ spec:
Remember to now configure a subscription to handling the dead letter topics. For example you can create another declarative subscription to receive these on the same or a different application. The example below shows the checkout application subscribing to the `poisonMessages` topic with another subscription and sending these to be handled by the `/failedmessages` endpoint.
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: deadlettertopics
spec:
topic: poisonMessages
route: /failedMessages
routes:
rules:
- match:
path: /failedMessages
pubsubname: pubsub
scopes:
- checkout

View File

@ -141,13 +141,14 @@ $app->start();
Similarly, you can subscribe to raw events declaratively by adding the `rawPayload` metadata entry to your subscription specification.
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: myevent-subscription
spec:
topic: deathStarStatus
route: /dsstatus
routes:
default: /dsstatus
pubsubname: pubsub
metadata:
rawPayload: "true"

View File

@ -22,13 +22,14 @@ The examples below demonstrate pub/sub messaging between a `checkout` app and an
You can subscribe declaratively to a topic using an external component file. This example uses a YAML component file named `subscription.yaml`:
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: order
spec:
topic: orders
route: /checkout
routes:
default: /checkout
pubsubname: pubsub
scopes:
- orderprocessing

View File

@ -47,7 +47,7 @@ The diagram below is an overview of how Dapr's service invocation works when inv
## Using an HTTPEndpoint resource or FQDN URL for non-Dapr endpoints
There are two ways to invoke a non-Dapr endpoint when communicating either to Dapr applications or non-Dapr applications. A Dapr application can invoke a non-Dapr endpoint by providing one of the following:
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}}) guide for an example.
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}}) guide for an example.
```sh
localhost:3500/v1.0/invoke/<HTTPEndpoint-name>/method/<my-method>
@ -81,7 +81,7 @@ curl http://localhost:3602/v1.0/invoke/orderprocessor/method/checkout
## Related Links
- [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}})
- [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}})
- [Service invocation overview]({{< ref service-invocation-overview.md >}})
- [Service invocation API specification]({{< ref service_invocation_api.md >}})

View File

@ -42,6 +42,9 @@ Even though metadata values can contain secrets in plain text, it is recommended
Depending on the pub/sub message bus you are using and how it is configured, topics may be created automatically. Even if the message bus supports automatic topic creation, it is a common governance practice to disable it in production environments. You may still need to use a CLI, admin console, or request form to manually create the topics required by your application.
{{% /alert %}}
While all pub/sub components support `consumerID` metadata, the runtime creates a consumer ID if you do not supply one. All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup.
For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}).
Visit [this guide]({{< ref "howto-publish-subscribe.md#step-3-publish-a-topic" >}}) for instructions on configuring and using pub/sub components.
## Related links

View File

@ -15,118 +15,120 @@ description: "Enable Dapr metrics and logs with Azure Monitor for Azure Kubernet
## Enable Prometheus metric scrape using config map
1. Make sure that omsagents are running
1. Make sure that Azure Monitor Agents (AMA) are running.
```bash
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
...
omsagent-75qjs 1/1 Running 1 44h
omsagent-c7c4t 1/1 Running 0 44h
omsagent-rs-74f488997c-dshpx 1/1 Running 1 44h
omsagent-smtk7 1/1 Running 1 44h
...
```
```bash
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
...
ama-logs-48kpv 2/2 Running 0 2d13h
ama-logs-mx24c 2/2 Running 0 2d13h
ama-logs-rs-f9bbb9898-vbt6k 1/1 Running 0 30h
ama-logs-sm2mz 2/2 Running 0 2d13h
ama-logs-z7p4c 2/2 Running 0 2d13h
...
```
2. Apply config map to enable Prometheus metrics endpoint scrape.
1. Apply config map to enable Prometheus metrics endpoint scrape.
You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable prometheus metrics endpoint scrape.
You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable Prometheus metrics endpoint scrape.
If you installed Dapr to the different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example:
If you installed Dapr to a different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example:
```yaml
...
prometheus-data-collection-settings: |-
[prometheus_data_collection_settings.cluster]
interval = "1m"
monitor_kubernetes_pods = true
monitor_kubernetes_pods_namespaces = ["dapr-system", "default"]
[prometheus_data_collection_settings.node]
interval = "1m"
...
```
```yaml
...
prometheus-data-collection-settings: |-
[prometheus_data_collection_settings.cluster]
interval = "1m"
monitor_kubernetes_pods = true
monitor_kubernetes_pods_namespaces = ["dapr-system", "default"]
[prometheus_data_collection_settings.node]
interval = "1m"
...
```
Apply config map:
Apply config map:
```bash
kubectl apply -f ./azm-config.map.yaml
```
```bash
kubectl apply -f ./azm-config.map.yaml
```
## Install Dapr with JSON formatted logs
1. Install Dapr with enabling JSON-formatted logs
1. Install Dapr with enabling JSON-formatted logs.
```bash
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
```
```bash
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
```
2. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations.
1. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations.
> Note: OMS Agent scrapes the metrics only if replicaset has Prometheus annotations.
> Note: The Azure Monitor Agents (AMA) only sends the metrics if the Prometheus annotations are set.
Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml.
Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml.
Example:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pythonapp
namespace: default
labels:
app: python
spec:
replicas: 1
selector:
matchLabels:
app: python
template:
metadata:
labels:
app: python
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "pythonapp"
dapr.io/log-as-json: "true"
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/"
Example:
...
```
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pythonapp
namespace: default
labels:
app: python
spec:
replicas: 1
selector:
matchLabels:
app: python
template:
metadata:
labels:
app: python
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "pythonapp"
dapr.io/log-as-json: "true"
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/"
...
```
## Search metrics and logs with Azure Monitor
1. Go to Azure Monitor
1. Go to Azure Monitor in the Azure portal.
2. Search Dapr logs
1. Search Dapr **Logs**.
Here is an example query, to parse JSON formatted logs and query logs from dapr system processes.
Here is an example query, to parse JSON formatted logs and query logs from Dapr system processes.
```
ContainerLog
| extend parsed=parse_json(LogEntry)
| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance']
| where level != ""
| sort by Time
```
```
ContainerLog
| extend parsed=parse_json(LogEntry)
| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance']
| where level != ""
| sort by Time
```
3. Search metrics
1. Search **Metrics**.
This query, queries process_resident_memory_bytes Prometheus metrics for Dapr system processes and renders timecharts
This query, queries `process_resident_memory_bytes` Prometheus metrics for Dapr system processes and renders timecharts.
```
InsightsMetrics
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
| extend tags=parse_json(Tags)
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
| where app startswith "dapr-"
| render timechart
```
```
InsightsMetrics
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
| extend tags=parse_json(Tags)
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
| where app startswith "dapr-"
| render timechart
```
# References
## References
* [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration)
* [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config)
* [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language)
- [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration)
- [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config)
- [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language)

View File

@ -179,7 +179,7 @@ Example:
"topic": "newOrder",
"route": "/orders",
"metadata": {
"rawPayload": "true",
"rawPayload": "true"
}
}
]

View File

@ -39,6 +39,8 @@ spec:
secretKeyRef:
name: kafka-secrets
key: saslPasswordSecret
- name: saslMechanism
value: "SHA-512"
- name: initialOffset # Optional. Used for input bindings.
value: "newest"
- name: maxMessageBytes # Optional.
@ -61,6 +63,7 @@ spec:
| authType | Y | Input/Output | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"` |
| saslUsername | N | Input/Output | The SASL username used for authentication. Only required if `authRequired` is set to `"true"`. | `"adminuser"` |
| saslPassword | N | Input/Output | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authRequired` is set to `"true"`. | `""`, `"KeFg23!"` |
| saslMechanism | N | Input/Output | The SASL authentication mechanism you'd like to use. Only required if `authtype` is set to `"password"`. If not provided, defaults to `PLAINTEXT`, which could cause a break for some services, like Amazon Managed Service for Kafka. | `"SHA-512", "SHA-256", "PLAINTEXT"` |
| initialOffset | N | Input | The initial offset to use if no offset was previously committed. Should be "newest" or "oldest". Defaults to "newest". | `"oldest"` |
| maxMessageBytes | N | Input/Output | The maximum size in bytes allowed for a single Kafka message. Defaults to 1024. | `2048` |
| oidcTokenEndpoint | N | Input/Output | Full URL to an OAuth2 identity provider access token endpoint. Required when `authType` is set to `oidc` | "https://identity.example.com/v1/token" |

View File

@ -96,6 +96,8 @@ An HTTP 204 (No Content) and empty body is returned if successful.
You can get a record in Redis using the `get` operation. This gets a key that was previously set.
This takes an optional parameter `delete`, which is by default `false`. When it is set to `true`, this operation uses the `GETDEL` operation of Redis. For example, it returns the `value` which was previously set and then deletes it.
#### Request
```json
@ -120,6 +122,20 @@ You can get a record in Redis using the `get` operation. This gets a key that wa
}
```
#### Request with delete flag
```json
{
"operation": "get",
"metadata": {
"key": "key1",
"delete": "true"
},
"data": {
}
}
```
### delete
You can delete a record in Redis using the `delete` operation. Returns success whether the key exists or not.

View File

@ -27,6 +27,8 @@ spec:
value: "***********"
- name: queueName
value: "myqueue"
# - name: pollingInterval
# value: "30s"
# - name: ttlInSeconds
# value: "60"
# - name: decodeBase64
@ -50,6 +52,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| `accountName` | Y | Input/Output | The name of the Azure Storage account | `"account1"` |
| `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Azure AD authentication. | `"access-key"` |
| `queueName` | Y | Input/Output | The name of the Azure Storage queue | `"myqueue"` |
| `pollingInterval` | N | Output | Set the interval to poll Azure Storage Queues for new messages, as a Go duration value. Default: `"10s"` | `"30s"` |
| `ttlInSeconds` | N | Output | Parameter to set the default message time to live. If this parameter is omitted, messages will expire after 10 minutes. See [also](#specifying-a-ttl-per-message) | `"60"` |
| `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to Storage Queues. (In case of saving a file with binary content). Defaults to `false` | `true`, `false` |
| `encodeBase64` | N | Output | If enabled base64 encodes the data payload before uploading to Azure storage queues. Default `false`. | `true`, `false` |

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Apache Kafka pubsub create a component of type `pubsub.kafka`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}).
To set up Apache Kafka pub/sub, create a component of type `pubsub.kafka`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup.
For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}).
@ -27,6 +27,8 @@ spec:
value: "dapr-kafka.myapp.svc.cluster.local:9092"
- name: consumerGroup # Optional. Used for input bindings.
value: "{namespace}"
- name: consumerID # Optional. If not supplied, runtime will create one.
value: "channel1"
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
value: "my-dapr-app-id"
- name: authType # Required.
@ -49,12 +51,15 @@ spec:
value: "true"
```
> For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}).
## Spec metadata fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| brokers | Y | A comma-separated list of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"`
| consumerGroup | N | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"`
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"sarama"`. | `"my-dapr-app"`
| authRequired | N | *Deprecated* Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"`
| authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"`

View File

@ -9,7 +9,8 @@ aliases:
## Component format
To setup AWS SNS/SQS for pub/sub, create a component of type `pubsub.snssqs`. [Learn more on how to create and apply a pubsub configuration]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}).
To set up AWS SNS/SQS pub/sub, create a component of type `pubsub.snssqs`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -26,9 +27,11 @@ spec:
value: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
- name: region
value: "us-east-1"
# - name: consumerID # Optional. If not supplied, runtime will create one.
# value: "channel1"
# - name: endpoint # Optional.
# value: "http://localhost:4566"
# - name: sessionToken # Optional (mandatory if using AssignedRole, i.e. temporary accessKey and secretKey)
# - name: sessionToken # Optional (mandatory if using AssignedRole; for example, temporary accessKey and secretKey)
# value: "TOKEN"
# - name: messageVisibilityTimeout # Optional
# value: 10
@ -59,7 +62,7 @@ spec:
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use [a secret store for the secrets]]({{< ref component-secrets.md >}}).
The above example uses secrets as plain strings. It is recommended to use [a secret store for the secrets]({{< ref component-secrets.md >}}).
{{% /alert %}}
## Spec metadata fields
@ -69,6 +72,7 @@ The above example uses secrets as plain strings. It is recommended to use [a sec
| accessKey | Y | ID of the AWS account/role with appropriate permissions to SNS and SQS (see below) | `"AKIAIOSFODNN7EXAMPLE"`
| secretKey | Y | Secret for the AWS user/role. If using an `AssumeRole` access, you will also need to provide a `sessionToken` |`"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"`
| region | Y | The AWS region where the SNS/SQS assets are located or be created in. See [this page](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ugi&l=na) for valid regions. Ensure that SNS and SQS are available in that region | `"us-east-1"`
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| endpoint | N | AWS endpoint for the component to use. Only used for local development with, for example, [localstack](https://github.com/localstack/localstack). The `endpoint` is unncessary when running against production AWS | `"http://localhost:4566"`
| sessionToken | N | AWS session token to use. A session token is only required if you are using temporary security credentials | `"TOKEN"`
| messageReceiveLimit | N | Number of times a message is received, after processing of that message fails, that once reached, results in removing of that message from the queue. If `sqsDeadLettersQueueName` is specified, `messageReceiveLimit` is the number of times a message is received, after processing of that message fails, that once reached, results in moving of the message to the SQS dead-letters queue. Default: `10` | `10`
@ -242,7 +246,7 @@ In order to run in AWS, create or assign an IAM user with permissions to the SNS
Plug the `AWS account ID` and `AWS account secret` into the `accessKey` and `secretKey` in the component metadata, using Kubernetes secrets and `secretKeyRef`.
Alternatively, let's say you want to provision the SNS and SQS assets using your own tool of choice (e.g. Terraform) while preventing Dapr from doing so dynamically. You need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role, with a policy like:
Alternatively, let's say you want to provision the SNS and SQS assets using your own tool of choice (for example, Terraform) while preventing Dapr from doing so dynamically. You need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role, with a policy like:
```json
{

View File

@ -9,7 +9,8 @@ aliases:
## Component format
To setup an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
To set up an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
Apart from the configuration metadata fields shown below, Azure Event Hubs also supports [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms.
```yaml
@ -28,6 +29,8 @@ spec:
# Use eventHubNamespace when using Azure AD
- name: eventHubNamespace
value: "namespace"
- name: consumerID # Optional. If not supplied, the runtime will create one.
value: "channel1"
- name: enableEntityManagement
value: "false"
# The following four properties are needed only if enableEntityManagement is set to true
@ -61,6 +64,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| `connectionString` | Y* | Connection string for the Event Hub or the Event Hub namespace.<br>* Mutally exclusive with `eventHubNamespace` field.<br>* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"`
| `eventHubNamespace` | Y* | The Event Hub Namespace name.<br>* Mutally exclusive with `connectionString` field.<br>* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"`
| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| `storageAccountName` | Y | Storage account name to use for the checkpoint store. |`"myeventhubstorage"`
| `storageAccountKey` | Y* | Storage account key for the checkpoint store account.<br>* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"`
| `storageConnectionString` | Y* | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey=<account-key>"`

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Azure Service Bus Queues pubsub create a component of type `pubsub.azure.servicebus.queues`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up Azure Service Bus Queues pub/sub, create a component of type `pubsub.azure.servicebus.queues`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
> This component uses queues on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions).
> For using topics, see the [Azure Service Bus Topics pubsub component]({{< ref "setup-azure-servicebus-topics" >}}).
@ -28,6 +28,8 @@ spec:
# Required when not using Azure AD Authentication
- name: connectionString
value: "Endpoint=sb://{ServiceBusNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={ServiceBus}"
# - name: consumerID # Optional
# value: channel1
# - name: timeoutInSec # Optional
# value: 60
# - name: handlerTimeoutInSec # Optional
@ -69,6 +71,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above
| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` |
| `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30`
| `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30`

View File

@ -10,7 +10,7 @@ aliases:
## Component format
To setup Azure Service Bus Topics pubsub create a component of type `pubsub.azure.servicebus.topics`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up Azure Service Bus Topics pub/sub, create a component of type `pubsub.azure.servicebus.topics`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
> This component uses topics on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions).
> For using queues, see the [Azure Service Bus Queues pubsub component]({{< ref "setup-azure-servicebus-queues" >}}).
@ -75,7 +75,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above
| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` |
| `consumerID` | N | Consumer ID (a.k.a consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. |
| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. (`appID`) value. |
| `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30`
| `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30`
| `lockRenewalInSec` | N | Defines the frequency at which buffered message locks will be renewed. Default: `20`. | `20`

View File

@ -1,7 +1,7 @@
---
type: docs
title: "GCP Pub/Sub"
linkTitle: "GCP Pub/Sub"
title: "GCP"
linkTitle: "GCP"
description: "Detailed documentation on the GCP Pub/Sub component"
aliases:
- "/operations/components/setup-pubsub/supported-pubsub/setup-gcp/"
@ -10,7 +10,7 @@ aliases:
## Create a Dapr component
To setup GCP pubsub create a component of type `pubsub.gcp.pubsub`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration
To set up GCP pub/sub, create a component of type `pubsub.gcp.pubsub`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -72,7 +72,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| projectId | Y | GCP project id| `myproject-123`
| endpoint | N | GCP endpoint for the component to use. Only used for local development (for example) with [GCP Pub/Sub Emulator](https://cloud.google.com/pubsub/docs/emulator). The `endpoint` is unnecessary when running against the GCP production API. | `"http://localhost:8085"`
| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the Dapr runtime will set it to the Dapr application ID. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID |
| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID |
| identityProjectId | N | If the GCP pubsub project is different from the identity project, specify the identity project using this attribute | `"myproject-123"`
| privateKeyId | N | If using explicit credentials, this field should contain the `private_key_id` field from the service account json document | `"my-private-key"`
| privateKey | N | If using explicit credentials, this field should contain the `private_key` field from the service account json | `-----BEGIN PRIVATE KEY-----MIIBVgIBADANBgkqhkiG9w0B`

View File

@ -8,10 +8,7 @@ aliases:
---
## Component format
To setup JetStream pubsub create a component of type `pubsub.jetstream`. See
[this guide]({{< ref
"howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to
create and apply a pubsub configuration.
To set up JetStream pub/sub, create a component of type `pubsub.jetstream`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -34,6 +31,8 @@ spec:
value: "/path/to/tls.key"
- name: token # Optional. Used for token based authentication.
value: "my-token"
- name: consumerID
value: "channel1"
- name: name
value: "my-conn-name"
- name: streamName
@ -84,6 +83,7 @@ spec:
| tls_client_cert | N | NATS TLS Client Authentication Certificate | `"/path/to/tls.crt"` |
| tls_client_key | N | NATS TLS Client Authentication Key | `"/path/to/tls.key"` |
| token | N | [NATS token based authentication] | `"my-token"` |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| name | N | NATS connection name | `"my-conn-name"` |
| streamName | N | Name of the JetStream Stream to bind to | `"my-stream"` |
| durableName | N | [Durable name] | `"my-durable"` |

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup KubeMQ pub/sub, create a component of type `pubsub.kubemq`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
To set up KubeMQ pub/sub, create a component of type `pubsub.kubemq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -24,6 +24,8 @@ spec:
value: localhost:50000
- name: store
value: false
- name: consumerID
value: channel1
```
## Spec metadata fields
@ -32,6 +34,7 @@ spec:
|-------------------|:--------:|-----------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
| address | Y | Address of the KubeMQ server | `"localhost:50000"` |
| store | N | type of pubsub, true: pubsub persisted (EventsStore), false: pubsub in-memory (Events) | `true` or `false` (default is `false`) |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| clientID | N | Name for client id connection | `sub-client-12345` |
| authToken | N | Auth JWT token for connection Check out [KubeMQ Authentication](https://docs.kubemq.io/learn/access-control/authentication) | `ew...` |
| group | N | Subscriber group for load balancing | `g1` |

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup MQTT pubsub create a component of type `pubsub.mqtt`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration
To set up MQTT pub/sub, create a component of type `pubsub.mqtt`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -28,6 +28,8 @@ spec:
value: "false"
- name: cleanSession
value: "false"
- name: consumerID
value: "channel1"
```
{{% alert title="Warning" color="warning" %}}
@ -62,7 +64,7 @@ There is a crucial difference between the two ways of retries:
### Communication using TLS
To configure communication using TLS, ensure that the MQTT broker (e.g. mosquitto) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
To configure communication using TLS, ensure that the MQTT broker (for example, mosquitto) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -10,7 +10,7 @@ aliases:
## Component format
To setup a MQTT3 pubsub create a component of type `pubsub.mqtt3`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration
To set up a MQTT3 pub/sub, create a component of type `pubsub.mqtt3`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -30,6 +30,8 @@ spec:
value: "false"
- name: qos
value: "1"
- name: consumerID
value: "channel1"
```
{{% alert title="Warning" color="warning" %}}
@ -51,7 +53,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
### Communication using TLS
To configure communication using TLS, ensure that the MQTT broker (e.g. emqx) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
To configure communication using TLS, ensure that the MQTT broker (for example, emqx) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -8,7 +8,7 @@ aliases:
---
## Component format
To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See [the how-to guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -25,6 +25,8 @@ spec:
value: "clusterId"
- name: concurrencyMode
value: parallel
- name: consumerID # Optional. If not supplied, runtime will create one.
value: "channel1"
# below are subscription configuration.
- name: subscriptionType
value: <REPLACE-WITH-SUBSCRIPTION-TYPE> # Required. Allowed values: topic, queue.
@ -66,6 +68,7 @@ NATS Streaming has been [deprecated](https://github.com/nats-io/nats-streaming-s
| natsURL | Y | NATS server address URL | "`nats://localhost:4222`"|
| natsStreamingClusterID | Y | NATS cluster ID |`"clusterId"`|
| subscriptionType | Y | Subscription type. Allowed values `"topic"`, `"queue"` | `"topic"` |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| ackWaitTime | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"300ms"`|
| maxInFlight | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"25"` |
| durableSubscriptionName | N | [Durable subscriptions](https://docs.nats.io/developing-with-nats-streaming/durables) identification name. | `"my-durable"`|

View File

@ -9,7 +9,9 @@ aliases:
## Component format
To setup Apache Pulsar pubsub create a component of type `pubsub.pulsar`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For more information on Apache Pulsar [read the docs](https://pulsar.apache.org/docs/en/concepts-overview/)
To set up Apache Pulsar pub/sub, create a component of type `pubsub.pulsar`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
For more information on Apache Pulsar, [read the official docs](https://pulsar.apache.org/docs/en/concepts-overview/).
```yaml
apiVersion: dapr.io/v1alpha1
@ -29,7 +31,7 @@ spec:
- name: token
value: "eyJrZXlJZCI6InB1bHNhci1wajU0cXd3ZHB6NGIiLCJhbGciOiJIUzI1NiJ9.eyJzd"
- name: consumerID
value: "topic1"
value: "channel1"
- name: namespace
value: "default"
- name: persistent
@ -60,6 +62,11 @@ spec:
}
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a [secret store for the secrets]({{< ref component-secrets.md >}}). This component supports storing the `token` parameter and any other sensitive parameter and data as Kubernetes Secrets.
{{% /alert %}}
## Spec metadata fields
| Field | Required | Details | Example |
@ -68,7 +75,7 @@ spec:
| enableTLS | N | Enable TLS. Default: `"false"` | `"true"`, `"false"` |
| token | N | Enable Authentication. | [How to create pulsar token](https://pulsar.apache.org/docs/en/security-jwt/#generate-tokens)|
| tenant | N | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. Default: `"public"` | `"public"` |
| consumerID | N | Used to set the subscription name or consumer ID. | `"topic1"`
| consumerID | N | Used to set the subscription name or consumer ID. | `"channel1"`
| namespace | N | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Default: `"default"` | `"default"`
| persistent | N | Pulsar supports two kinds of topics: [persistent](https://pulsar.apache.org/docs/en/concepts-architecture-overview#persistent-storage) and [non-persistent](https://pulsar.apache.org/docs/en/concepts-messaging/#non-persistent-topics). With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topics is not persisted to storage disks.
| disableBatching | N | disable batching.When batching enabled default batch delay is set to 10 ms and default batch size is 1000 messages,Setting `disableBatching: true` will make the producer to send messages individually. Default: `"false"` | `"true"`, `"false"`|
@ -94,8 +101,8 @@ When invoking the Pulsar pub/sub, it's possible to provide an optional delay que
These optional parameter names are `metadata.deliverAt` or `metadata.deliverAfter`:
- `deliverAt`: Delay message to deliver at a specified time (RFC3339 format), e.g. `"2021-09-01T10:00:00Z"`
- `deliverAfter`: Delay message to deliver after a specified amount of time, e.g.`"4h5m3s"`
- `deliverAt`: Delay message to deliver at a specified time (RFC3339 format); for example, `"2021-09-01T10:00:00Z"`
- `deliverAfter`: Delay message to deliver after a specified amount of time; for example,`"4h5m3s"`
Examples:

View File

@ -9,6 +9,8 @@ aliases:
## Component format
To set up RabbitMQ pub/sub, create a component of type `pubsub.rabbitmq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
@ -73,7 +75,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| hostname | N* | The RabbitMQ hostname. *Mutally exclusive with connectionString field | `localhost` |
| username | N* | The RabbitMQ username. *Mutally exclusive with connectionString field | `username` |
| password | N* | The RabbitMQ password. *Mutally exclusive with connectionString field | `password` |
| consumerID | N | Consumer ID a.k.a consumer tag organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. |
| durable | N | Whether or not to use [durable](https://www.rabbitmq.com/queues.html#durability) queues. Defaults to `"false"` | `"true"`, `"false"`
| deletedWhenUnused | N | Whether or not the queue should be configured to [auto-delete](https://www.rabbitmq.com/queues.html) Defaults to `"true"` | `"true"`, `"false"`
| autoAck | N | Whether or not the queue consumer should [auto-ack](https://www.rabbitmq.com/confirms.html) messages. Defaults to `"false"` | `"true"`, `"false"`
@ -87,7 +89,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| maxLen | N | The maximum number of messages of a queue and its dead letter queue (if dead letter enabled). If both `maxLen` and `maxLenBytes` are set then both will apply; whichever limit is hit first will be enforced. Defaults to no limit. | `"1000"` |
| maxLenBytes | N | Maximum length in bytes of a queue and its dead letter queue (if dead letter enabled). If both `maxLen` and `maxLenBytes` are set then both will apply; whichever limit is hit first will be enforced. Defaults to no limit. | `"1048576"` |
| exchangeKind | N | Exchange kind of the rabbitmq exchange. Defaults to `"fanout"`. | `"fanout"`,`"topic"` |
| saslExternal | N | With TLS, should the username be taken from an additional field (e.g. CN.) See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` |
| saslExternal | N | With TLS, should the username be taken from an additional field (for example, CN). See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` |
| caCert | Required for using TLS | Input/Output | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
| clientCert | Required for using TLS | Input/Output | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
| clientKey | Required for using TLS | Input/Output | TLS client key in PEM format. Must be used with `clientCert`. Can be `secretKeyRef` to use a secret reference. | `"-----BEGIN RSA PRIVATE KEY-----\n<base64-encoded PKCS8>\n-----END RSA PRIVATE KEY-----"`

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Redis Streams pubsub create a component of type `pubsub.redis`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up Redis Streams pub/sub, create a component of type `pubsub.redis`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -8,7 +8,7 @@ aliases:
---
## Component format
To setup RocketMQ pubsub, create a component of type `pubsub.rocketmq`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up RocketMQ pub/sub, create a component of type `pubsub.rocketmq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -25,6 +25,8 @@ spec:
value: dapr-rocketmq-test-g-c
- name: producerGroup
value: dapr-rocketmq-test-g-p
- name: consumerID
value: topic
- name: nameSpace
value: dapr-test
- name: nameServer
@ -47,6 +49,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| instanceName | N | Instance name | `time.Now().String()` | `dapr-rocketmq-test` |
| consumerGroup | N | Consumer group name. Recommend. If `producerGroup` is `null``groupName` is used. | | `dapr-rocketmq-test-g-c ` |
| producerGroup (consumerID) | N | Producer group name. Recommended. If `producerGroup` is `null``consumerID` is used. If `consumerID` also is null, `groupName` is used. | | `dapr-rocketmq-test-g-p` |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| groupName | N | Consumer/Producer group name. **Depreciated**. | | `dapr-rocketmq-test-g` |
| nameSpace | N | RocketMQ namespace | | `dapr-rocketmq` |
| nameServerDomain | N | RocketMQ name server domain | | `https://my-app.net:8080/nsaddr` |

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Solace-AMQP pub/sub, create a component of type `pubsub.solace.amqp`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
To set up Solace-AMQP pub/sub, create a component of type `pubsub.solace.amqp`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -26,6 +26,8 @@ spec:
value: 'default'
- name: password
value: 'default'
- name: consumerID
value: 'channel1'
```
{{% alert title="Warning" color="warning" %}}
@ -39,6 +41,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| url | Y | Address of the AMQP broker. Can be `secretKeyRef` to use a secret reference. <br> Use the **`amqp://`** URI scheme for non-TLS communication. <br> Use the **`amqps://`** URI scheme for TLS communication. | `"amqp://host.domain[:port]"`
| username | Y | The username to connect to the broker. Only required if anonymous is not specified or set to `false` .| `default`
| password | Y | The password to connect to the broker. Only required if anonymous is not specified or set to `false`. | `default`
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| anonymous | N | To connect to the broker without credential validation. Only works if enabled on the broker. A username and password would not be required if this is set to `true`. | `true`
| caCert | Required for using TLS | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
| clientCert | Required for using TLS | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`

View File

@ -1,15 +1,17 @@
---
type: docs
title: "SQL Server"
linkTitle: "SQL Server"
description: Detailed information on the SQL Server state store component
title: "Microsoft SQL Server & Azure SQL"
linkTitle: "Microsoft SQL Server & Azure SQL"
description: Detailed information on the Microsoft SQL Server state store component
aliases:
- "/operations/components/setup-state-store/supported-state-stores/setup-sqlserver/"
---
## Component format
To setup SQL Server state store create a component of type `state.sqlserver`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
This state store component can be used with both [Microsoft SQL Server](https://learn.microsoft.com/sql/) and [Azure SQL](https://learn.microsoft.com/azure/azure-sql/).
To set up this state store, create a component of type `state.sqlserver`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
```yaml
@ -21,30 +23,42 @@ spec:
type: state.sqlserver
version: v1
metadata:
- name: connectionString
value: <REPLACE-WITH-CONNECTION-STRING> # Required.
- name: tableName
value: <REPLACE-WITH-TABLE-NAME> # Optional. defaults to "state"
- name: keyType
value: <REPLACE-WITH-KEY-TYPE> # Optional. defaults to "string"
- name: keyLength
value: <KEY-LENGTH> # Optional. defaults to 200. You be used with "string" keyType
- name: schema
value: <SCHEMA> # Optional. defaults to "dbo"
- name: indexedProperties
value: <INDEXED-PROPERTIES> # Optional. List of IndexedProperties.
- name: metadataTableName # Optional. Name of the table where to store metadata used by Dapr
value: "dapr_metadata"
- name: cleanupIntervalInSeconds # Optional. Cleanup interval in seconds, to remove expired rows
value: 300
# Authenticate using SQL Server credentials
- name: connectionString
value: |
Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;
# Authenticate with Azure AD (Azure SQL only)
# "useAzureAD" be set to "true"
- name: useAzureAD
value: true
# Connection string or URL of the Azure SQL database, optionally containing the database
- name: connectionString
value: |
sqlserver://myServerName.database.windows.net:1433?database=myDataBase
# Other optional fields (listing default values)
- name: tableName
value: "state"
- name: metadataTableName
value: "dapr_metadata"
- name: schema
value: "dbo"
- name: keyType
value: "string"
- name: keyLength
value: "200"
- name: indexedProperties
value: ""
- name: cleanupIntervalInSeconds
value: "3600"
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
{{% /alert %}}
If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#configuring-state-store-for-actors" >}}), append the following to the yaml.
If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#configuring-state-store-for-actors" >}}), append the following to the metadata:
```yaml
- name: actorStateStore
@ -53,24 +67,43 @@ If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#co
## Spec metadata fields
### Authenticate using SQL Server credentials
The following metadata options are **required** to authenticate using SQL Server credentials. This is supported on both SQL Server and Azure SQL.
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `connectionString` | Y | The connection string used to connect.<br>If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"` |
### Authenticate using Azure AD
Authenticating with Azure AD is supported with Azure SQL only. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity.
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` |
| `connectionString` | Y | The connection string or URL of the Azure SQL database, **without credentials**.<br>If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"sqlserver://myServerName.database.windows.net:1433?database=myDataBase"` |
| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` |
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
| `azureClientSecret` | N | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` |
### Other metadata options
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| connectionString | Y | The connection string used to connect. If the connection string contains the database it must already exist. If the database is omitted a default database named `"Dapr"` is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"`
| tableName | N | The name of the table to use. Alpha-numeric with underscores. Defaults to `"state"` | `"table_name"`
| keyType | N | The type of key used. Defaults to `"string"` | `"string"`
| keyLength | N | The max length of key. Used along with `"string"` keytype. Defaults to `"200"` | `"200"`
| schema | N | The schema to use. Defaults to `"dbo"` | `"dapr"`,`"dbo"`
| indexedProperties | N | List of IndexedProperties. | `'[{"column": "transactionid", "property": "id", "type": "int"}, {"column": "customerid", "property": "customer", "type": "nvarchar(100)"}]'`
| actorStateStore | N | Indicates that Dapr should configure this component for the actor state store ([more information]({{< ref "state_api.md#configuring-state-store-for-actors" >}})). | `"true"`
| metadataTableName | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. | `"dapr_metadata"`
| cleanupIntervalInSeconds | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `3600` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `1800`, `-1`
| `tableName` | N | The name of the table to use. Alpha-numeric with underscores. Defaults to `"state"` | `"table_name"`
| `metadataTableName` | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. | `"dapr_metadata"`
| `keyType` | N | The type of key used. Supported values: `"string"` (default), `"uuid"`, `"integer"`.| `"string"`
| `keyLength` | N | The max length of key. Ignored if "keyType" is not `string`. Defaults to `"200"` | `"200"`
| `schema` | N | The schema to use. Defaults to `"dbo"` | `"dapr"`,`"dbo"`
| `indexedProperties` | N | List of indexed properties, as a string containing a JSON document. | `'[{"column": "transactionid", "property": "id", "type": "int"}, {"column": "customerid", "property": "customer", "type": "nvarchar(100)"}]'`
| `actorStateStore` | N | Indicates that Dapr should configure this component for the actor state store ([more information]({{< ref "state_api.md#configuring-state-store-for-actors" >}})). | `"true"`
| `cleanupIntervalInSeconds` | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `"3600"` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `"1800"`, `"-1"`
## Create Azure SQL instance
## Create a Microsoft SQL Server/Azure SQL instance
[Follow the instructions](https://docs.microsoft.com/azure/azure-sql/database/single-database-create-quickstart?view=azuresql&tabs=azure-portal) from the Azure documentation on how to create a SQL database. The database must be created before Dapr consumes it.
**Note: SQL Server state store also supports SQL Server running on VMs and in Docker.**
[Follow the instructions](https://docs.microsoft.com/azure/azure-sql/database/single-database-create-quickstart?view=azuresql&tabs=azure-portal) from the Azure documentation on how to create a SQL database. The database must be created before Dapr consumes it.
In order to setup SQL Server as a state store, you need the following properties:
@ -104,6 +137,7 @@ CREATE CLUSTERED INDEX expiredate_idx ON state(ExpireDate ASC)
```
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components
- [State management building block]({{< ref state-management >}})

View File

@ -12,7 +12,7 @@ The following table lists the environment variables used by the Dapr runtime, CL
| -------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| APP_ID | Your application | The id for your application, used for service discovery |
| APP_PORT | Dapr sidecar | The port your application is listening on |
| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. |
| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. |
| DAPR_HTTP_PORT | Your application | The HTTP port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. |
| DAPR_GRPC_PORT | Your application | The gRPC port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. |
| DAPR_API_TOKEN | Dapr sidecar | The token used for Dapr API authentication for requests from the application. [Enable API token authentication in Dapr]({{< ref api-token >}}). |
@ -24,4 +24,6 @@ The following table lists the environment variables used by the Dapr runtime, CL
| DAPR_HELM_REPO_PASSWORD | A password for a private Helm chart |The password required to access the private Dapr helm chart. If it can be accessed publicly, this env variable does not need to be set|
| OTEL_EXPORTER_OTLP_ENDPOINT | OpenTelemetry Tracing | Sets the Open Telemetry (OTEL) server address, turns on tracing. (Example: `http://localhost:4318`) |
| OTEL_EXPORTER_OTLP_INSECURE | OpenTelemetry Tracing | Sets the connection to the endpoint as unencrypted. (`true`, `false`) |
| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) |
| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) |
| DAPR_COMPONENTS_SOCKETS_FOLDER | Dapr runtime and the .NET, Go, and Java pluggable component SDKs | The location or path where Dapr looks for Pluggable Components Unix Domain Socket files. If unset this location defaults to `/tmp/dapr-components-sockets` |
| DAPR_COMPONENTS_SOCKETS_EXTENSION | .NET and Java pluggable component SDKs | A per-SDK configuration that indicates the default file extension applied to socket files created by the SDKs. Not a Dapr-enforced behavior. |

View File

@ -1,9 +1,9 @@
---
type: docs
title: "Component schema"
linkTitle: "Component schema"
weight: 100
description: "The basic schema for a Dapr component"
title: "Component spec"
linkTitle: "Component"
weight: 1000
description: "The basic spec for a Dapr component"
---
Dapr defines and registers components using a [CustomResourceDefinition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). All components are defined as a CRD and can be applied to any hosting environment where Dapr is running, not just Kubernetes.

View File

@ -1,9 +1,9 @@
---
type: docs
title: "HTTPEndpoint spec"
linkTitle: "HTTPEndpoint spec"
description: "The HTTPEndpoint resource spec"
weight: 300
linkTitle: "HTTPEndpoint"
description: "The basic spec for a Dapr HTTPEndpoint resource"
weight: 4000
aliases:
- "/operations/httpEndpoints/"
---

View File

@ -0,0 +1,63 @@
---
type: docs
title: "Resiliency spec"
linkTitle: "Resiliency"
weight: 3000
description: "The basic spec for a Dapr resiliency resource"
---
The `Resiliency` Dapr resource allows you to define and apply fault tolerance resiliency policies. Resiliency specs are applied when the Dapr sidecar starts.
```yml
apiVersion: dapr.io/v1alpha1
kind: Resiliency
metadata:
name: <REPLACE-WITH-RESOURCE-NAME>
version: v1alpha1
scopes:
- <REPLACE-WITH-SCOPED-APPIDS>
spec:
policies: # Required
timeouts: # Replace with any unique name
timeoutName: <REPLACE-WITH-TIME-VALUE>
retries:
retryName: # Replace with any unique name
policy: <REPLACE-WITH-VALUE>
duration: <REPLACE-WITH-VALUE>
maxInterval: <REPLACE-WITH-VALUE>
maxRetries: <REPLACE-WITH-VALUE>
circuitBreakers:
circuitBreakerName: # Replace with any unique name
maxRequests: <REPLACE-WITH-VALUE>
timeout: <REPLACE-WITH-VALUE>
trip: <REPLACE-WITH-CONSECUTIVE-FAILURE-VALUE>
targets: # Required
apps:
appID: # Replace with scoped app ID
timeout: <REPLACE-WITH-TIMEOUT-NAME>
retry: <REPLACE-WITH-RETRY-NAME>
circuitBreaker: <REPLACE-WITH-CIRCUIT-BREAKER-NAME>
actors:
myActorType:
timeout: <REPLACE-WITH-TIMEOUT-NAME>
retry: <REPLACE-WITH-RETRY-NAME>
circuitBreaker: <REPLACE-WITH-CIRCUIT-BREAKER-NAME>
circuitBreakerCacheSize: <REPLACE-WITH-VALUE>
components:
componentName: # Replace with your component name
outbound:
timeout: <REPLACE-WITH-TIMEOUT-NAME>
retry: <REPLACE-WITH-RETRY-NAME>
circuitBreaker: <REPLACE-WITH-CIRCUIT-BREAKER-NAME>
```
## Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| policies | Y | The configuration of resiliency policies, including: <br><ul><li>`timeouts`</li><li>`retries`</li><li>`circuitBreakers`</li></ul> <br> [See more examples with all of the built-in policies]({{< ref policies.md >}}) | timeout: `general`<br>retry: `retryForever`<br>circuit breaker: `simpleCB` |
| targets | Y | The configuration for the applications, actors, or components that use the resiliency policies. <br>[See more examples in the resiliency targets guide]({{< ref targets.md >}}) | `apps` <br>`components`<br>`actors` |
## Related links
[Learn more about resiliency policies and targets]({{< ref resiliency-overview.md >}})

View File

@ -0,0 +1,88 @@
---
type: docs
title: "Subscription spec"
linkTitle: "Subscription"
weight: 2000
description: "The basic spec for a Dapr subscription"
---
The `Subscription` Dapr resource allows you to subscribe declaratively to a topic using an external component YAML file. This guide demonstrates two subscription API versions:
- `v2alpha` (default spec)
- `v1alpha1` (deprecated)
## `v2alpha1`
The following is the basic `v2alpha1` spec for a `Subscription` resource. `v2alpha1` is the default spec for the subscription API.
```yml
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: <REPLACE-WITH-NAME>
spec:
version: v2alpha1
topic: <REPLACE-WITH-TOPIC-NAME> # Required
routes: # Required
- rules:
- match: <REPLACE-WITH-EVENT-TYPE>
path: <REPLACE-WITH-PATH>
pubsubname: <REPLACE-WITH-PUBSUB-NAME> # Required
deadlettertopic: <REPLACE-WITH-TOPIC-NAME> # Optional
bulksubscribe: # Optional
- enabled: <REPLACE-WITH-TOPIC-NAME>
- maxmessages: <REPLACE-WITH-TOPIC-NAME>
- maxawaitduration: <REPLACE-WITH-TOPIC-NAME>
scopes:
- <REPLACE-WITH-SCOPED-APPIDS>
```
### Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| topic | Y | The name of the topic to which your component subscribes. | `orders` |
| routes | Y | The routes configuration for this topic, including specifying the condition for sending a message to a specific path. Includes the following fields: <br><ul><li>match: _Optional._ The CEL expression used to match the event. If not specified, the route is considered the default. </li><li>path: The path for events that match this rule. </li></ul>The endpoint to which all topic messages are sent. | `match: event.type == "widget"` <br>`path: /widgets` |
| pubsubname | N | The name of your pub/sub component. | `pubsub` |
| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` |
| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` |
## `v1alpha1`
The following is the basic version `v1alpha1` spec for a `Subscription` resource. `v1alpha1` is now deprecated.
```yml
apiVersion: dapr.io/v1alpha1
kind: Subscription
metadata:
name: <REPLACE-WITH-RESOURCE-NAME>
spec:
version: v1alpha1
topic: <REPLACE-WITH-TOPIC-NAME> # Required
route: <REPLACE-WITH-ROUTE-NAME> # Required
pubsubname: <REPLACE-WITH-PUBSUB-NAME> # Required
deadLetterTopic: <REPLACE-WITH-DEAD-LETTER-TOPIC-NAME> # Optional
bulkSubscribe: # Optional
- enabled: <REPLACE-WITH-BOOLEAN-VALUE>
- maxmessages: <REPLACE-WITH-VALUE>
- maxawaitduration: <REPLACE-WITH-VALUE>
scopes:
- <REPLACE-WITH-SCOPED-APPIDS>
```
### Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| topic | Y | The name of the topic to which your component subscribes. | `orders` |
| route | Y | The endpoint to which all topic messages are sent. | `/checkout` |
| pubsubname | N | The name of your pub/sub component. | `pubsub` |
| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` |
| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` |
## Related links
- [Learn more about the declarative subscription method]({{< ref "subscription-methods.md#declarative-subscriptions" >}})
- [Learn more about dead letter topics]({{< ref pubsub-deadletter.md >}})
- [Learn more about routing messages]({{< ref "howto-route-messages.md#declarative-subscription" >}})
- [Learn more about bulk subscribing]({{< ref pubsub-bulk.md >}})

View File

@ -20,7 +20,7 @@
etag: true
ttl: true
query: true
- component: Azure SQL Server
- component: Microsoft SQL Server
link: setup-sqlserver
state: Stable
version: v1

View File

@ -1,19 +1,13 @@
<script src="/js/copy-code-button.js"></script>
{{ with .Site.Params.algolia_docsearch }}
<script src="https://cdn.jsdelivr.net/npm/docsearch.js@2.6.3/dist/cdn/docsearch.min.js"></script>
<script>
<script src="https://cdn.jsdelivr.net/npm/@docsearch/js@3"></script>
<script type="text/javascript">
docsearch({
// Your apiKey and indexName will be given to you once
// we create your config
apiKey: '54ae43aa28ce8f00c54c8d5f544d29b9',
indexName: 'crawler_dapr',
container: '#docsearch',
appId: 'O0QLQGNF38',
// Replace inputSelector with a CSS selector
// matching your search input
inputSelector: '.td-search-input',
// Set debug to true to inspect the dropdown
debug: false,
apiKey: '54ae43aa28ce8f00c54c8d5f544d29b9',
indexName: 'daprdocs',
});
</script>
{{ end }}
<script src="/js/copy-code-button.js"></script>

View File

@ -1,3 +1,3 @@
{{ with .Site.Params.algolia_docsearch }}
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@docsearch/css@3" />
{{ end }}

View File

@ -0,0 +1,30 @@
{{ if .Site.Params.gcs_engine_id -}}
<input type="search" class="form-control td-search-input" placeholder="&#xf002; {{ T "ui_search" }}" aria-label="{{ T "ui_search" }}" autocomplete="off">
{{ else if .Site.Params.algolia_docsearch -}}
<div id="docsearch"></div>
{{ else if .Site.Params.offlineSearch -}}
{{ $offlineSearchIndex := resources.Get "json/offline-search-index.json" | resources.ExecuteAsTemplate "offline-search-index.json" . -}}
{{ if hugo.IsProduction -}}
{{/* Use `md5` as finger print hash function to shorten file name to avoid `file name too long` error. */ -}}
{{ $offlineSearchIndex = $offlineSearchIndex | fingerprint "md5" -}}
{{ end -}}
{{ $offlineSearchLink := $offlineSearchIndex.RelPermalink -}}
<input
type="search"
class="form-control td-search-input"
placeholder="&#xf002; {{ T "ui_search" }}"
aria-label="{{ T "ui_search" }}"
autocomplete="off"
{{/*
The data attribute name of the json file URL must end with `src` since
Hugo's absurlreplacer requires `src`, `href`, `action` or `srcset` suffix for the attribute name.
If the absurlreplacer is not applied, the URL will start with `/`.
It causes the json file loading error when when relativeURLs is enabled.
https://github.com/google/docsy/issues/181
*/}}
data-offline-search-index-json-src="{{ $offlineSearchLink }}"
data-offline-search-base-href="/"
data-offline-search-max-results="{{ .Site.Params.offlineSearchMaxResults | default 10 }}"
>
{{ end -}}

View File

@ -71,7 +71,7 @@ spec:
spec:
containers:
- name: otel-collector
image: otel/opentelemetry-collector-contrib:0.50.0
image: otel/opentelemetry-collector-contrib:0.77.0
command:
- "/otelcol-contrib"
- "--config=/conf/otel-collector-config.yaml"

View File

@ -1,49 +1,35 @@
function addCopyButtons(clipboard) {
document.querySelectorAll('pre > code').forEach(function(codeBlock) {
var button = document.createElement('button');
button.className = 'copy-code-button';
button.type = 'button';
button.innerText = 'Copy';
const highlightClass = document.querySelectorAll('.highlight');
button.addEventListener('click', function() {
clipboard.writeText(codeBlock.textContent).then(
function() {
button.blur();
highlightClass.forEach(element => {
const copyIcon = document.createElement('i');
copyIcon.classList.add('fas', 'fa-copy', 'copy-icon');
copyIcon.style.color = 'white';
copyIcon.style.display = 'none';
element.appendChild(copyIcon);
button.innerText = 'Copied!';
setTimeout(function() {
button.innerText = 'Copy';
}, 2000);
},
function(error) {
button.innerText = 'Error';
console.error(error);
}
);
});
element.addEventListener('mouseenter', () => {
copyIcon.style.display = 'inline';
});
var pre = codeBlock.parentNode;
if (pre.parentNode.classList.contains('highlight')) {
var highlight = pre.parentNode;
highlight.parentNode.insertBefore(button, highlight);
} else {
pre.parentNode.insertBefore(button, pre);
}
});
}
element.addEventListener('mouseleave', () => {
copyIcon.style.display = 'none';
copyIcon.classList.replace('fa-check', 'fa-copy');
});
if (navigator && navigator.clipboard) {
addCopyButtons(navigator.clipboard);
} else {
var script = document.createElement('script');
script.src =
'https://cdnjs.cloudflare.com/ajax/libs/clipboard-polyfill/2.7.0/clipboard-polyfill.promise.js';
script.integrity = 'sha256-waClS2re9NUbXRsryKoof+F9qc1gjjIhc2eT7ZbIv94=';
script.crossOrigin = 'anonymous';
copyIcon.addEventListener('click', async () => {
const selection = window.getSelection();
const range = document.createRange();
range.selectNodeContents(element);
selection.removeAllRanges();
selection.addRange(range);
script.onload = function() {
addCopyButtons(clipboard);
};
document.body.appendChild(script);
}
try {
await navigator.clipboard.writeText(selection.toString());
console.log('Text copied to clipboard');
copyIcon.classList.replace('fa-copy', 'fa-check');
selection.removeAllRanges();
} catch (error) {
console.error('Failed to copy: ', error);
}
});
});