Merge branch 'issue_3435' of https://github.com/hhunter-ms/docs into issue_3435

This commit is contained in:
Hannah Hunter 2023-07-26 15:42:28 -04:00
commit 6a008a91da
103 changed files with 1729 additions and 682 deletions

118
.github/scripts/algolia.py vendored Normal file
View File

@ -0,0 +1,118 @@
import os
from re import S
import sys
import json
from bs4 import BeautifulSoup
from algoliasearch.search_client import SearchClient
url = "docs.dapr.io"
if len(sys.argv) > 1:
starting_directory = os.path.join(os.getcwd(), str(sys.argv[1]))
else:
starting_directory = os.getcwd()
ALGOLIA_APP_ID = os.getenv('ALGOLIA_APP_ID')
ALGOLIA_API_KEY = os.getenv('ALGOLIA_API_WRITE_KEY')
ALGOLIA_INDEX_NAME = os.getenv('ALGOLIA_INDEX_NAME')
client = SearchClient.create(ALGOLIA_APP_ID, ALGOLIA_API_KEY)
index = client.init_index(ALGOLIA_INDEX_NAME)
excluded_files = [
"404.html",
]
exluded_directories = [
"zh-hans",
]
rankings = {
"Getting started": 0,
"Concepts": 100,
"Developing applications": 200,
"Operations": 300,
"Reference": 400,
"Contributing": 500,
"Home": 600
}
def scan_directory(directory: str, pages: list):
if os.path.basename(directory) in exluded_directories:
print(f'Skipping directory: {directory}')
return
for file in os.listdir(directory):
path = os.path.join(directory, file)
if os.path.isfile(path):
if file.endswith(".html") and file not in excluded_files:
if '<!-- DISABLE_ALGOLIA -->' not in open(path, encoding="utf8").read():
print(f'Indexing: {path}')
pages.append(path)
else:
print(f'Skipping hidden page: {path}')
else:
scan_directory(path, pages)
def parse_file(path: str):
data = {}
data["hierarchy"] = {}
data["rank"] = 999
data["subrank"] = 99
data["type"] = "lvl2"
data["lvl0"] = ""
data["lvl1"] = ""
data["lvl2"] = ""
data["lvl3"] = ""
text = ""
subrank = 0
with open(path, "r", errors='ignore') as file:
content = file.read()
soup = BeautifulSoup(content, "html.parser")
for meta in soup.find_all("meta"):
if meta.get("name") == "description":
data["lvl2"] = meta.get("content")
data["hierarchy"]["lvl1"] = meta.get("content")
elif meta.get("property") == "og:title":
data["lvl0"] = meta.get("content")
data["hierarchy"]["lvl0"] = meta.get("content")
data["hierarchy"]["lvl2"] = meta.get("content")
elif meta.get("property") == "og:url":
data["url"] = meta.get("content")
data["path"] = meta.get("content").split(url)[1]
data["objectID"] = meta.get("content").split(url)[1]
breadcrumbs = soup.find_all("li", class_="breadcrumb-item")
try:
subrank = len(breadcrumbs)
data["subrank"] = subrank
except:
subrank = 99
data["subrank"] = 99
for bc in breadcrumbs:
section = bc.text.strip()
data["lvl1"] = section
data["hierarchy"]["lvl0"] = section
try:
data["rank"] = rankings[section] + subrank
except:
print(f"Rank not found for section {section}")
data["rank"] = 998
break
for p in soup.find_all("p"):
if p.text != "":
text = text + p.text
data["text"] = text
return data
def index_payload(payload):
res = index.replace_all_objects(payload)
res.wait()
if __name__ == "__main__":
pages = []
payload = []
scan_directory(starting_directory, pages)
for page in pages:
data = parse_file(page)
if "objectID" in data:
payload.append(data)
index_payload(payload)

View File

@ -1,6 +1,7 @@
name: Azure Static Web App Root
on:
workflow_dispatch:
push:
branches:
- v1.11
@ -9,35 +10,66 @@ on:
branches:
- v1.11
concurrency:
# Cancel the previously triggered build for only PR build.
group: website-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
build_and_deploy_job:
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed')
name: Build Hugo Website
if: github.event.action != 'closed'
runs-on: ubuntu-latest
name: Build and Deploy Job
env:
SWA_BASE: 'proud-bay-0e9e0e81e'
HUGO_ENV: production
steps:
- uses: actions/checkout@v3
- name: Checkout docs repo
uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
submodules: true
- name: Setup Node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Setup Hugo
uses: peaceiris/actions-hugo@v2.5.0
with:
hugo-version: 0.102.3
extended: true
- name: Setup Docsy
run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli
- name: Build And Deploy
id: builddeploy
run: |
cd daprdocs
git submodule update --init --recursive
sudo npm install -D --save autoprefixer
sudo npm install -D --save postcss-cli
- name: Build Hugo Website
run: |
cd daprdocs
git config --global --add safe.directory /github/workspace
if [ $GITHUB_EVENT_NAME == 'pull_request' ]; then
STAGING_URL="https://${SWA_BASE}-${{github.event.number}}.westus2.azurestaticapps.net/"
fi
hugo ${STAGING_URL+-b "$STAGING_URL"}
- name: Deploy docs site
uses: Azure/static-web-apps-deploy@v1
env:
HUGO_ENV: production
HUGO_VERSION: "0.100.2"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
skip_deploy_on_missing_secrets: true
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
repo_token: ${{ secrets.GITHUB_TOKEN }}
action: "upload"
app_location: "/daprdocs"
app_build_command: "git config --global --add safe.directory /github/workspace && hugo"
output_location: "public"
skip_api_build: true
app_location: "daprdocs/public/"
api_location: "daprdocs/public/"
output_location: ""
skip_app_build: true
skip_deploy_on_missing_secrets: true
- name: Upload Hugo artifacts
uses: actions/upload-artifact@v3
with:
name: hugo_build
path: ./daprdocs/public/
if-no-files-found: error
close_pull_request_job:
close_staging_site:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
@ -48,3 +80,30 @@ jobs:
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
action: "close"
skip_deploy_on_missing_secrets: true
algolia_index:
name: Index site for Algolia
if: github.event_name == 'push'
needs: ['build_and_deploy_job']
runs-on: ubuntu-latest
env:
ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }}
ALGOLIA_API_WRITE_KEY: ${{ secrets.ALGOLIA_API_WRITE_KEY }}
ALGOLIA_INDEX_NAME: daprdocs
steps:
- name: Checkout docs repo
uses: actions/checkout@v2
with:
submodules: false
- name: Download Hugo artifacts
uses: actions/download-artifact@v3
with:
name: hugo_build
path: site/
- name: Install Python packages
run: |
pip install --upgrade bs4
pip install --upgrade 'algoliasearch>=2.0,<3.0'
- name: Index site
run: python ./.github/scripts/algolia.py ./site

View File

@ -1,38 +1,12 @@
// Code formatting.
.copy-code-button {
color: #272822;
background-color: #FFF;
border-color: #0D2192;
border: 2px solid;
border-radius: 3px 3px 0px 0px;
/* right-align */
display: block;
margin-left: auto;
margin-right: 0;
margin-bottom: -2px;
padding: 3px 8px;
font-size: 0.8em;
.highlight .copy-icon {
position: absolute;
right: 20px;
top: 18px;
opacity: 0.7;
}
.copy-code-button:hover {
cursor: pointer;
background-color: #F2F2F2;
}
.copy-code-button:focus {
/* Avoid an ugly focus outline on click in Chrome,
but darken the button for accessibility.
See https://stackoverflow.com/a/25298082/1481479 */
background-color: #E6E6E6;
outline: 0;
}
.copy-code-button:active {
background-color: #D9D9D9;
}
.highlight pre {
/* Avoid pushing up the copy buttons. */
@ -40,25 +14,31 @@
}
.td-content {
// Highlighted code.
// Highlighted code.
.highlight {
@extend .card;
margin: 0rem 0;
padding: 0rem;
margin-bottom: 2rem;
max-width: 100%;
border: none;
pre {
margin: 0;
padding: 1rem;
border-radius: 10px;
}
}
// Inline code
p code, li > code, table code {
p code,
li>code,
table code {
color: inherit;
padding: 0.2em 0.4em;
margin: 0;
@ -78,11 +58,11 @@
word-wrap: normal;
background-color: $gray-100;
padding: $spacer;
max-width: 100%;
> code {
background-color: inherit !important;
>code {
background-color: inherit !important;
padding: 0;
margin: 0;
font-size: 100%;

View File

@ -7,42 +7,68 @@ description: >
Observe applications through tracing, metrics, logs and health
---
When building an application, understanding how the system is behaving is an important part of operating it - this includes having the ability to observe the internal calls of an application, gauging its performance and becoming aware of problems as soon as they occur. This is challenging for any system, but even more so for a distributed system comprised of multiple microservices where a flow, made of several calls, may start in one microservice but continue in another. Observability is critical in production environments, but also useful during development to understand bottlenecks, improve performance and perform basic debugging across the span of microservices.
When building an application, understanding the system behavior is an important, yet challenging part of operating it, such as:
- Observing the internal calls of an application
- Gauging its performance
- Becoming aware of problems as soon as they occur
While some data points about an application can be gathered from the underlying infrastructure (for example memory consumption, CPU usage), other meaningful information must be collected from an "application-aware" layerone that can show how an important series of calls is executed across microservices. This usually means a developer must add some code to instrument an application for this purpose. Often, instrumentation code is simply meant to send collected data such as traces and metrics to observability tools or services that can help store, visualize and analyze all this information.
This can be particularly challenging for a distributed system comprised of multiple microservices, where a flow made of several calls may start in one microservice and continue in another.
Having to maintain this code, which is not part of the core logic of the application, is a burden on the developer, sometimes requiring understanding the observability tools' APIs, using additional SDKs etc. This instrumentation may also add to the portability challenges of an application, which may require different instrumentation depending on where the application is deployed. For example, different cloud providers offer different observability tools and an on-premises deployment might require a self-hosted solution.
Observability into your application is critical in production environments, and can be useful during development to:
- Understand bottlenecks
- Improve performance
- Perform basic debugging across the span of microservices
While some data points about an application can be gathered from the underlying infrastructure (memory consumption, CPU usage), other meaningful information must be collected from an "application-aware" layer one that can show how an important series of calls is executed across microservices. Typically, you'd add some code to instrument an application, which simply sends collected data (such as traces and metrics) to observability tools or services that can help store, visualize, and analyze all this information.
Maintaining this instrumentation code, which is not part of the core logic of the application, requires understanding the observability tools' APIs, using additional SDKs, etc. This instrumentation may also present portability challenges for your application, requiring different instrumentation depending on where the application is deployed. For example:
- Different cloud providers offer different observability tools
- An on-premises deployment might require a self-hosted solution
## Observability for your application with Dapr
When building an application which leverages Dapr API building blocks to perform service-to-service calls and pub/sub messaging, Dapr offers an advantage with respect to [distributed tracing]({{<ref tracing>}}). Because this inter-service communication flows through the Dapr runtime (or "sidecar"), Dapr is in a unique position to offload the burden of application-level instrumentation.
When you leverage Dapr API building blocks to perform service-to-service calls and pub/sub messaging, Dapr offers an advantage with respect to [distributed tracing]({{< ref develop-tracing >}}). Since this inter-service communication flows through the Dapr runtime (or "sidecar"), Dapr is in a unique position to offload the burden of application-level instrumentation.
### Distributed tracing
Dapr can be [configured to emit tracing data]({{<ref setup-tracing.md>}}), and because Dapr does so using the widely adopted protocols of [Open Telemetry (OTEL)](https://opentelemetry.io/) and [Zipkin](https://zipkin.io), it can be easily integrated with multiple observability tools.
Dapr can be [configured to emit tracing data]({{< ref setup-tracing.md >}}) using the widely adopted protocols of [Open Telemetry (OTEL)](https://opentelemetry.io/) and [Zipkin](https://zipkin.io). This makes it easily integrated with multiple observability tools.
<img src="/images/observability-tracing.png" width=1000 alt="Distributed tracing with Dapr">
### Automatic tracing context generation
Dapr uses [W3C tracing]({{<ref w3c-tracing-overview>}}) specification for tracing context, included as part Open Telemetry (OTEL), to generate and propagate the context header for the application or propagate user-provided context headers. This means that you get tracing by default with Dapr.
Dapr uses [W3C tracing]({{< ref w3c-tracing-overview >}}) specification for tracing context, included as part Open Telemetry (OTEL), to generate and propagate the context header for the application or propagate user-provided context headers. This means that you get tracing by default with Dapr.
## Observability for the Dapr sidecar and control plane
You also want to be able to observe Dapr itself, by collecting metrics on performance, throughput and latency and logs emitted by the Dapr sidecar, as well as the Dapr control plane services. Dapr sidecars have a health endpoint that can be probed to indicate their health status.
You can also observe Dapr itself, by:
- Generating logs emitted by the Dapr sidecar and the Dapr control plane services
- Collecting metrics on performance, throughput, and latency
- Using health endpoints probes to indicate the Dapr sidecar health status
<img src="/images/observability-sidecar.png" width=1000 alt="Dapr sidecar metrics, logs and health checks">
### Logging
Dapr generates [logs]({{<ref "logs.md">}}) to provide visibility into sidecar operation and to help users identify issues and perform debugging. Log events contain warning, error, info, and debug messages produced by Dapr system services. Dapr can also be configured to send logs to collectors such as [Fluentd]({{< ref fluentd.md >}}), [Azure Monitor]({{< ref azure-monitor.md >}}), and other observability tools, so that logs can be searched and analyzed to provide insights.
Dapr generates [logs]({{< ref logs.md >}}) to:
- Provide visibility into sidecar operation
- Help users identify issues and perform debugging
Log events contain warning, error, info, and debug messages produced by Dapr system services. You can also configure Dapr to send logs to collectors, such as Open Telemetry Collector, [Fluentd]({{< ref fluentd.md >}}), [New Relic]({{< ref "operations/monitoring/logging/newrelic.md" >}}), [Azure Monitor]({{< ref azure-monitor.md >}}), and other observability tools, so that logs can be searched and analyzed to provide insights.
### Metrics
Metrics are the series of measured values and counts that are collected and stored over time. [Dapr metrics]({{<ref "metrics">}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and control plane. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests, etc. Dapr [control plane metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures and the health of control plane services, including CPU usage, number of actor placements made, etc.
Metrics are a series of measured values and counts collected and stored over time. [Dapr metrics]({{< ref metrics >}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and control plane. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests, etc.
Dapr [control plane metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures and the health of control plane services, including CPU usage, number of actor placements made, etc.
### Health checks
The Dapr sidecar exposes an HTTP endpoint for [health checks]({{<ref sidecar-health.md>}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness.
The Dapr sidecar exposes an HTTP endpoint for [health checks]({{< ref sidecar-health.md >}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness.
Conversely, Dapr can be configured to probe for the [health of your application]({{<ref app-health.md >}}), and react to changes in the app's health, including stopping pub/sub subscriptions and short-circuiting service invocation calls.
Conversely, Dapr can be configured to probe for the [health of your application]({{< ref app-health.md >}}), and react to changes in the app's health, including stopping pub/sub subscriptions and short-circuiting service invocation calls.
## Next steps
- [Learn more about observability in developing with Dapr]({{< ref develop-tracing >}})
- [Learn more about observability in operating with Dapr]({{< ref tracing >}})

View File

@ -211,6 +211,21 @@ The Dapr threat model is below.
## Security audit
### June 2023
In June 2023, Dapr completed a fuzzing audit done by Ada Logics.
The audit achieved the following:
- OSS-Fuzz integration
- 39 new fuzzers for Dapr
- Fuzz test coverage for Dapr Runtime, Kit and Components-contrib
- All fuzzers running continuously after the audit has completed
You can find the full report [here](/docs/Dapr-june-2023-fuzzing-audit-report.pdf).
3 issues were found during the audit.
### February 2021
In February 2021, Dapr went through a 2nd security audit targeting its 1.0 release by Cure53.
@ -255,4 +270,4 @@ Visit [this page]({{< ref support-security-issues.md >}}) to report a security i
## Related links
[Operational Security]({{< ref "security.md" >}})
[Operational Security]({{< ref "security.md" >}})

View File

@ -39,11 +39,11 @@ Style and tone conventions should be followed throughout all Dapr documentation
## Diagrams and images
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/presentations), which includes guidance on style and icons.
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/presentations), which includes guidance on style and icons.
As you create diagrams for your documentation:
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/images).
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/images).
- Name your PNG files using the convention of a concept or building block so that they are grouped.
- For example: `service-invocation-overview.png`.
- For more information on calling out images using shortcode, see the [Images guidance](#images) section below.
@ -458,4 +458,4 @@ Steps to add a language:
## Next steps
Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}).
Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}).

View File

@ -8,4 +8,5 @@ description: "Dapr capabilities that solve common development challenges for dis
Get a high-level [overview of Dapr building blocks]({{< ref building-blocks-concept >}}) in the **Concepts** section.
<img src="/images/buildingblocks-overview.png" alt="Diagram showing the different Dapr API building blocks" width=1000>
<img src="/images/buildingblocks-overview.png" alt="Diagram showing the different Dapr API building blocks" width=1000>

View File

@ -5,3 +5,10 @@ linkTitle: "Actors"
weight: 50
description: Encapsulate code and data in reusable actor objects as a common microservices design pattern
---
{{% alert title="More about Dapr Actors" color="primary" %}}
Learn more about how to use Dapr Actors:
- Try the [Actors quickstart]({{< ref actors-quickstart.md >}}).
- Explore actors via any of the [Dapr SDKs]({{< ref sdks >}}).
- Review the [Actors API reference documentation]({{< ref actors_api.md >}}).
{{% /alert %}}

View File

@ -5,3 +5,12 @@ linkTitle: "Bindings"
weight: 40
description: Interface with or be triggered from external systems
---
{{% alert title="More about Dapr Bindings" color="primary" %}}
Learn more about how to use Dapr Bindings:
- Try the [Bindings quickstart]({{< ref bindings-quickstart.md >}}).
- Explore input and output bindings via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Bindings API reference documentation]({{< ref bindings_api.md >}}).
- Browse the supported [input and output bindings component specs]({{< ref supported-bindings >}}).
{{% /alert %}}

View File

@ -5,3 +5,11 @@ linkTitle: "Configuration"
weight: 80
description: Manage and be notified of application configuration changes
---
{{% alert title="More about Dapr Configuration" color="primary" %}}
Learn more about how to use Dapr Configuration:
- Try the [Configuration quickstart]({{< ref configuration-quickstart.md >}}).
- Explore configuration via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Configuration API reference documentation]({{< ref configuration_api.md >}}).
- Browse the supported [configuration component specs]({{< ref supported-configuration-stores >}}).
{{% /alert %}}

View File

@ -4,4 +4,11 @@ title: "Cryptography"
linkTitle: "Cryptography"
weight: 110
description: "Perform cryptographic operations without exposing keys to your application"
---
---
{{% alert title="More about Dapr Cryptography" color="primary" %}}
Learn more about how to use Dapr Cryptography:
- Try the [Cryptography quickstart]({{< ref cryptography-quickstart.md >}}).
- Explore cryptography via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Browse the supported [cryptography component specs]({{< ref supported-cryptography >}}).
{{% /alert %}}

View File

@ -5,3 +5,10 @@ linkTitle: "Distributed lock"
weight: 90
description: Distributed locks provide mutually exclusive access to shared resources from an application.
---
{{% alert title="More about Dapr Distributed Lock" color="primary" %}}
Learn more about how to use Dapr Distributed Lock:
- Explore distributed locks via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Distributed Lock API reference documentation]({{< ref distributed_lock_api.md >}}).
- Browse the supported [distributed locks component specs]({{< ref supported-locks >}}).
{{% /alert %}}

View File

@ -6,4 +6,10 @@ weight: 60
description: See and measure the message calls to components and between networked services
---
This section includes guides for developers in the context of observability. See other sections for a [general overview of the observability concept]({{< ref observability-concept >}}) in Dapr and for [operations guidance on monitoring]({{< ref monitoring >}}).
{{% alert title="More about Dapr Observability" color="primary" %}}
Learn more about how to use Dapr Observability Lock:
- Explore observability via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Observability API reference documentation]({{< ref health_api.md >}}).
- Read the [general overview of the observability concept]({{< ref observability-concept >}}) in Dapr.
- Learn the [operations perspective and guidance on monitoring]({{< ref monitoring >}}).
{{% /alert %}}

View File

@ -2,17 +2,22 @@
type: docs
title: "App health checks"
linkTitle: "App health checks"
weight: 300
weight: 100
description: Reacting to apps' health status changes
---
App health checks is a feature that allows probing for the health of your application and reacting to status changes.
The app health checks feature allows probing for the health of your application and reacting to status changes.
Applications can become unresponsive for a variety of reasons: for example, they could be too busy to accept new work, could have crashed, or be in a deadlock state. Sometimes the condition can be transitory, for example if the app is just busy (and will eventually be able to resume accepting new work), or if the application is being restarted for whatever reason and is in its initialization phase.
Applications can become unresponsive for a variety of reasons. For example, your application:
- Could be too busy to accept new work;
- Could have crashed; or
- Could be in a deadlock state.
When app health checks are enabled, the Dapr *runtime* (sidecar) periodically polls your application via HTTP or gRPC calls.
Sometimes the condition can be transitory, for example:
- If the app is just busy and will resume accepting new work eventually
- If the application is being restarted for whatever reason and is in its initialization phase
When it detects a failure in the app's health, Dapr stops accepting new work on behalf of the application by:
App health checks are disabled by default. Once you enable app health checks, the Dapr runtime (sidecar) periodically polls your application via HTTP or gRPC calls. When it detects a failure in the app's health, Dapr stops accepting new work on behalf of the application by:
- Unsubscribing from all pub/sub subscriptions
- Stopping all input bindings
@ -20,15 +25,14 @@ When it detects a failure in the app's health, Dapr stops accepting new work on
These changes are meant to be temporary, and Dapr resumes normal operations once it detects that the application is responsive again.
App health checks are disabled by default.
<img src="/images/observability-app-health.webp" width="800" alt="Diagram showing the app health feature. Running Dapr with app health enabled causes Dapr to periodically probe the app for its health.">
### App health checks vs platform-level health checks
## App health checks vs platform-level health checks
App health checks in Dapr are meant to be complementary to, and not replace, any platform-level health checks, like [liveness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) when running on Kubernetes.
Platform-level health checks (or liveness probes) generally ensure that the application is running, and cause the platform to restart the application in case of failures.
Unlike platform-level health checks, Dapr's app health checks focus on pausing work to an application that is currently unable to accept it, but is expected to be able to resume accepting work *eventually*. Goals include:
- Not bringing more load to an application that is already overloaded.
@ -36,7 +40,9 @@ Unlike platform-level health checks, Dapr's app health checks focus on pausing w
In this regard, Dapr's app health checks are "softer", waiting for an application to be able to process work, rather than terminating the running process in a "hard" way.
> Note that for Kubernetes, a failing App Health check won't remove a pod from service discovery: this remains the responsibility of the Kubernetes liveness probe, _not_ Dapr.
{{% alert title="Note" color="primary" %}}
For Kubernetes, a failing app health check won't remove a pod from service discovery: this remains the responsibility of the Kubernetes liveness probe, _not_ Dapr.
{{% /alert %}}
## Configuring app health checks
@ -52,34 +58,46 @@ The full list of options are listed in this table:
| CLI flags | Kubernetes deployment annotation | Description | Default value |
| ----------------------------- | ----------------------------------- | ----------- | ------------- |
| `--enable-app-health-check` | `dapr.io/enable-app-health-check` | Boolean that enables the health checks | Disabled |
| `--app-health-check-path` | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC) | `/healthz` |
| `--app-health-probe-interval` | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe | `5` |
| `--app-health-probe-timeout` | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests | `500` |
| `--app-health-threshold` | `dapr.io/app-health-threshold` | Max number of consecutive failures before the app is considered unhealthy | `3` |
| [`--app-health-check-path`]({{< ref "app-health.md#health-check-paths" >}}) | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC) | `/healthz` |
| [`--app-health-probe-interval`]({{< ref "app-health.md#intervals-timeouts-and-thresholds" >}}) | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe | `5` |
| [`--app-health-probe-timeout`]({{< ref "app-health.md#intervals-timeouts-and-thresholds" >}}) | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests | `500` |
| [`--app-health-threshold`]({{< ref "app-health.md#intervals-timeouts-and-thresholds" >}}) | `dapr.io/app-health-threshold` | Max number of consecutive failures before the app is considered unhealthy | `3` |
> See the [full Dapr arguments and annotations reference]({{<ref arguments-annotations-overview>}}) for all options and how to enable them.
> See the [full Dapr arguments and annotations reference]({{< ref arguments-annotations-overview >}}) for all options and how to enable them.
Additionally, app health checks are impacted by the protocol used for the app channel, which is configured with the `--app-protocol` flag (self-hosted) or the `dapr.io/app-protocol` annotation (Kubernetes); supported values are `http` (default), `grpc`, `https`, `grpcs`, and `h2c` (HTTP/2 Cleartext).
Additionally, app health checks are impacted by the protocol used for the app channel, which is configured with the following flag or annotation:
| CLI flag | Kubernetes deployment annotation | Description | Default value |
| ----------------------------- | ----------------------------------- | ----------- | ------------- |
| [`--app-protocol`]({{< ref "app-health.md#health-check-paths" >}}) | `dapr.io/app-protocol` | Protocol used for the app channel. supported values are `http`, `grpc`, `https`, `grpcs`, and `h2c` (HTTP/2 Cleartext). | `http` |
### Health check paths
#### HTTP
When using HTTP (including `http`, `https`, and `h2c`) for `app-protocol`, Dapr performs health probes by making an HTTP call to the path specified in `app-health-check-path`, which is `/health` by default.
For your app to be considered healthy, the response must have an HTTP status code in the 200-299 range. Any other status code is considered a failure. Dapr is only concerned with the status code of the response, and ignores any response header or body.
#### gRPC
When using gRPC for the app channel (`app-protocol` set to `grpc` or `grpcs`), Dapr invokes the method `/dapr.proto.runtime.v1.AppCallbackHealthCheck/HealthCheck` in your application. Most likely, you will use a Dapr SDK to implement the handler for this method.
While responding to a health probe request, your app *may* decide to perform additional internal health checks to determine if it's ready to process work from the Dapr runtime. However, this is not required; it's a choice that depends on your application's needs.
### Intervals, timeouts, and thresholds
When app health checks are enabled, by default Dapr probes your application every 5 seconds. You can configure the interval, in seconds, with `app-health-probe-interval`. These probes happen regularly, regardless of whether your application is healthy or not.
#### Intervals
By default, when app health checks are enabled, Dapr probes your application every 5 seconds. You can configure the interval, in seconds, with `app-health-probe-interval`. These probes happen regularly, regardless of whether your application is healthy or not.
#### Timeouts
When the Dapr runtime (sidecar) is initially started, Dapr waits for a successful health probe before considering the app healthy. This means that pub/sub subscriptions, input bindings, and service invocation requests won't be enabled for your application until this first health check is complete and successful.
Health probe requests are considered successful if the application sends a successful response (as explained above) within the timeout configured in `app-health-probe-timeout`. The default value is 500, corresponding to 500 milliseconds (i.e. half a second).
Health probe requests are considered successful if the application sends a successful response (as explained above) within the timeout configured in `app-health-probe-timeout`. The default value is 500, corresponding to 500 milliseconds (half a second).
#### Thresholds
Before Dapr considers an app to have entered an unhealthy state, it will wait for `app-health-threshold` consecutive failures, whose default value is 3. This default value means that your application must fail health probes 3 times *in a row* to be considered unhealthy.
If you set the threshold to 1, any failure causes Dapr to assume your app is unhealthy and will stop delivering work to it.
A threshold greater than 1 can help exclude transient failures due to external circumstances. The right value for your application depends on your requirements.
Thresholds only apply to failures. A single successful response is enough for Dapr to consider your app to be healthy and resume normal operations.

View File

@ -0,0 +1,7 @@
---
type: docs
title: "Tracing"
linkTitle: "Tracing"
weight: 300
description: Learn more about tracing scenarios and how to use tracing for visibility in your application
---

View File

@ -0,0 +1,113 @@
---
type: docs
title: "Distributed tracing"
linkTitle: "Distributed tracing"
weight: 300
description: "Use tracing to get visibility into your application"
---
Dapr uses the Open Telemetry (OTEL) and Zipkin protocols for distributed traces. OTEL is the industry standard and is the recommended trace protocol to use.
Most observability tools support OTEL, including:
- [Google Cloud Operations](https://cloud.google.com/products/operations)
- [New Relic](https://newrelic.com)
- [Azure Monitor](https://azure.microsoft.com/services/monitor/)
- [Datadog](https://www.datadoghq.com)
- Instana
- [Jaeger](https://www.jaegertracing.io/)
- [SignalFX](https://www.signalfx.com/)
## Scenarios
Tracing is used with service invocaton and pub/sub APIs. You can flow trace context between services that uses these APIs. There are two scenarios for how tracing is used:
1. Dapr generates the trace context and you propagate the trace context to another service.
1. You generate the trace context and Dapr propagates the trace context to a service.
### Scenario 1: Dapr generates trace context headers
#### Propagating sequential service calls
Dapr takes care of creating the trace headers. However, when there are more than two services, you're responsible for propagating the trace headers between them. Let's go through the scenarios with examples:
##### Single service invocation call
For example, `service A -> service B`.
Dapr generates the trace headers in `service A`, which are then propagated from `service A` to `service B`. No further propagation is needed.
##### Multiple sequential service invocation calls
For example, `service A -> service B -> propagate trace headers to -> service C` and so on to further Dapr-enabled services.
Dapr generates the trace headers at the beginning of the request in `service A`, which are then propagated to `service B`. You are now responsible for taking the headers and propagating them to `service C`, since this is specific to your application.
In other words, if the app is calling to Dapr and wants to trace with an existing trace header (span), it must always propagate to Dapr (from `service B` to `service C`, in this example). Dapr always propagates trace spans to an application.
{{% alert title="Note" color="primary" %}}
No helper methods are exposed in Dapr SDKs to propagate and retrieve trace context. You need to use HTTP/gRPC clients to propagate and retrieve trace headers through HTTP headers and gRPC metadata.
{{% /alert %}}
##### Request is from external endpoint
For example, `from a gateway service to a Dapr-enabled service A`.
An external gateway ingress calls Dapr, which generates the trace headers and calls `service A`. `Service A` then calls `service B` and further Dapr-enabled services.
You must propagate the headers from `service A` to `service B`. For example: `Ingress -> service A -> propagate trace headers -> service B`. This is similar to [case 2]({{< ref "tracing-overview.md#multiple-sequential-service-invocation-calls" >}}).
##### Pub/sub messages
Dapr generates the trace headers in the published message topic. These trace headers are propagated to any services listening on that topic.
#### Propagating multiple different service calls
In the following scenarios, Dapr does some of the work for you, with you then creating or propagating trace headers.
##### Multiple service calls to different services from single service
When you are calling multiple services from a single service, you need to propagate the trace headers. For example:
```
service A -> service B
[ .. some code logic ..]
service A -> service C
[ .. some code logic ..]
service A -> service D
[ .. some code logic ..]
```
In this case:
1. When `service A` first calls `service B`, Dapr generates the trace headers in `service A`.
1. The trace headers in `service A` are propagated to `service B`.
1. These trace headers are returned in the response from `service B` as part of response headers.
1. You then need to propagate the returned trace context to the next services, like `service C` and `service D`, as Dapr does not know you want to reuse the same header.
### Scenario 2: You generate your own trace context headers from non-Daprized applications
Generating your own trace context headers is more unusual and typically not required when calling Dapr.
However, there are scenarios where you could specifically choose to add W3C trace headers into a service call. For example, you have an existing application that does not use Dapr. In this case, Dapr still propagates the trace context headers for you.
If you decide to generate trace headers yourself, there are three ways this can be done:
1. Standard OpenTelemetry SDK
You can use the industry standard [OpenTelemetry SDKs](https://opentelemetry.io/docs/instrumentation/) to generate trace headers and pass these trace headers to a Dapr-enabled service. _This is the preferred method_.
1. Vendor SDK
You can use a vendor SDK that provides a way to generate W3C trace headers and pass them to a Dapr-enabled service.
1. W3C trace context
You can handcraft a trace context following [W3C trace context specifications](https://www.w3.org/TR/trace-context/) and pass them to a Dapr-enabled service.
Read [the trace context overview]({{< ref w3c-tracing-overview >}}) for more background and examples on W3C trace context and headers.
## Related Links
- [Observability concepts]({{< ref observability-concept.md >}})
- [W3C Trace Context for distributed tracing]({{< ref w3c-tracing-overview >}})
- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/)
- [Observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability)

View File

@ -0,0 +1,91 @@
---
type: docs
title: "W3C trace context"
linkTitle: "W3C trace context"
weight: 2000
description: Background and scenarios for using W3C tracing with Dapr
type: docs
---
Dapr uses the [Open Telemetry protocol](https://opentelemetry.io/), which in turn uses the [W3C trace context](https://www.w3.org/TR/trace-context/) for distributed tracing for both service invocation and pub/sub messaging. Dapr generates and propagates the trace context information, which can be sent to observability tools for visualization and querying.
## Background
Distributed tracing is a methodology implemented by tracing tools to follow, analyze, and debug a transaction across multiple software components.
Typically, a distributed trace traverses more than one service, which requires it to be uniquely identifiable. **Trace context propagation** passes along this unique identification.
In the past, trace context propagation was implemented individually by each different tracing vendor. In multi-vendor environments, this causes interoperability problems, such as:
- Traces collected by different tracing vendors can't be correlated, as there is no shared unique identifier.
- Traces crossing boundaries between different tracing vendors can't be propagated, as there is no forwarded, uniformly agreed set of identification.
- Vendor-specific metadata might be dropped by intermediaries.
- Cloud platform vendors, intermediaries, and service providers cannot guarantee to support trace context propagation, as there is no standard to follow.
Previously, most applications were monitored by a single tracing vendor and stayed within the boundaries of a single platform provider, so these problems didn't have a significant impact.
Today, an increasing number of applications are distributed and leverage multiple middleware services and cloud platforms. This transformation of modern applications requires a distributed tracing context propagation standard.
The [W3C trace context specification](https://www.w3.org/TR/trace-context/) defines a universally agreed-upon format for the exchange of trace context propagation data (referred to as trace context). Trace context solves the above problems by providing:
- A unique identifier for individual traces and requests, allowing trace data of multiple providers to be linked together.
- An agreed-upon mechanism to forward vendor-specific trace data and avoid broken traces when multiple tracing tools participate in a single transaction.
- An industry standard that intermediaries, platforms, and hardware providers can support.
This unified approach for propagating trace data improves visibility into the behavior of distributed applications, facilitating problem and performance analysis.
## W3C trace context and headers format
### W3C trace context
Dapr uses the standard W3C trace context headers.
- For HTTP requests, Dapr uses `traceparent` header.
- For gRPC requests, Dapr uses `grpc-trace-bin` header.
When a request arrives without a trace ID, Dapr creates a new one. Otherwise, it passes the trace ID along the call chain.
### W3C trace headers
These are the specific trace context headers that are generated and propagated by Dapr for HTTP and gRPC.
{{< tabs "HTTP" "gRPC" >}}
<!-- HTTP -->
{{% codetab %}}
Copy these headers when propagating a trace context header from an HTTP response to an HTTP request:
**Traceparent header**
The traceparent header represents the incoming request in a tracing system in a common format, understood by all vendors:
```
traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01
```
[Learn more about the traceparent fields details](https://www.w3.org/TR/trace-context/#traceparent-header).
**Tracestate header**
The tracestate header includes the parent in a potentially vendor-specific format:
```
tracestate: congo=t61rcWkgMzE
```
[Learn more about the tracestate fields details](https://www.w3.org/TR/trace-context/#tracestate-header).
{{% /codetab %}}
<!-- gRPC -->
{{% codetab %}}
In the gRPC API calls, trace context is passed through `grpc-trace-bin` header.
{{% /codetab %}}
{{< /tabs >}}
## Related Links
- [Learn more about distributed tracing in Dapr]({{< ref tracing-overview.md >}})
- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/)

View File

@ -11,7 +11,7 @@ Dapr provides a way to determine its health using an [HTTP `/healthz` endpoint](
- Probed for its health
- Determined for readiness and liveness
The Dapr `/healthz` endpoint can be used by health probes from the application hosting platform (for example Kubernetes). This topic describes how Dapr integrates with probes from different hosting platforms.
In this guide, you learn how the Dapr `/healthz` endpoint integrate with health probes from the application hosting platform (for example, Kubernetes).
When deploying Dapr to a hosting platform like Kubernetes, the Dapr health endpoint is automatically configured for you.
@ -23,20 +23,10 @@ Dapr actors also have a health API endpoint where Dapr probes the application fo
Kubernetes uses *readiness* and *liveness* probes to determines the health of the container.
The kubelet uses liveness probes to know when to restart a container.
For example, liveness probes could catch a deadlock, where an application is running but is unable to make progress. Restarting a container in such a state can help to make the application more available despite having bugs.
### Liveness
The kubelet uses liveness probes to know when to restart a container. For example, liveness probes could catch a deadlock (a running application that is unable to make progress). Restarting a container in such a state can help to make the application more available despite having bugs.
The kubelet uses readiness probes to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this readiness signal is to control which pods are used as backends for Kubernetes services. When a pod is not ready, it is removed from Kubernetes service load balancers.
{{% alert title="Note" color="primary" %}}
The Dapr sidecar will be in ready state once the application is accessible on its configured port. The application cannot access the Dapr components during application start up/initialization.
{{% /alert %}}
When integrating with Kubernetes, the Dapr sidecar is injected with a Kubernetes probe configuration telling it to use the Dapr healthz endpoint. This is done by the "Sidecar Injector" system service. The integration with the kubelet is shown in the diagram below.
<img src="/images/security-mTLS-dapr-system-services.png" width="800" alt="Diagram of Dapr services interacting" />
### How to configure a liveness probe in Kubernetes
#### How to configure a liveness probe in Kubernetes
In the pod configuration file, the liveness probe is added in the containers spec section as shown below:
@ -53,7 +43,14 @@ In the above example, the `periodSeconds` field specifies that the kubelet shoul
Any HTTP status code between 200 and 399 indicates success; any other status code indicates failure.
### How to configure a readiness probe in Kubernetes
### Readiness
The kubelet uses readiness probes to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this readiness signal is to control which pods are used as backends for Kubernetes services. When a pod is not ready, it is removed from Kubernetes service load balancers.
{{% alert title="Note" color="primary" %}}
The Dapr sidecar will be in ready state once the application is accessible on its configured port. The application cannot access the Dapr components during application start up/initialization.
{{% /alert %}}
#### How to configure a readiness probe in Kubernetes
Readiness probes are configured similarly to liveness probes. The only difference is that you use the `readinessProbe` field instead of the `livenessProbe` field:
@ -66,7 +63,13 @@ Readiness probes are configured similarly to liveness probes. The only differenc
periodSeconds: 3
```
### How the Dapr sidecar health endpoint is configured with Kubernetes
### Sidecar Injector
When integrating with Kubernetes, the Dapr sidecar is injected with a Kubernetes probe configuration telling it to use the Dapr `healthz` endpoint. This is done by the "Sidecar Injector" system service. The integration with the kubelet is shown in the diagram below.
<img src="/images/security-mTLS-dapr-system-services.png" width="800" alt="Diagram of Dapr services interacting" />
#### How the Dapr sidecar health endpoint is configured with Kubernetes
As mentioned above, this configuration is done automatically by the Sidecar Injector service. This section describes the specific values that are set on the liveness and readiness probes.
@ -91,7 +94,7 @@ Dapr has its HTTP health endpoint `/v1.0/healthz` on port 3500. This can be used
failureThreshold: 3
```
For more information refer to:
## Related links
- [Endpoint health API]({{< ref health_api.md >}})
- [Actor health API]({{< ref "actors_api.md#health-check" >}})

View File

@ -1,118 +0,0 @@
---
type: docs
title: "Distributed tracing"
linkTitle: "Distributed tracing"
weight: 100
description: "Use tracing to get visibility into your application"
---
Dapr uses the Open Telemetry (OTEL) and Zipkin protocols for distributed traces. OTEL is the industry standard and is the recommended trace protocol to use.
Most observability tools support OTEL. For example [Google Cloud Operations](https://cloud.google.com/products/operations), [New Relic](https://newrelic.com), [Azure Monitor](https://azure.microsoft.com/services/monitor/), [Datadog](https://www.datadoghq.com), Instana, [Jaeger](https://www.jaegertracing.io/), and [SignalFX](https://www.signalfx.com/).
## Scenarios
Tracing is used with service invocaton and pub/sub APIs. You can flow trace context between services that uses these APIs.
There are two scenarios for how tracing is used:
1. Dapr generates the trace context and you propagate the trace context to another service.
2. You generate the trace context and Dapr propagates the trace context to a service.
### Propagating sequential service calls
Dapr takes care of creating the trace headers. However, when there are more than two services, you're responsible for propagating the trace headers between them. Let's go through the scenarios with examples:
1. Single service invocation call (`service A -> service B`)
Dapr generates the trace headers in service A, which are then propagated from service A to service B. No further propagation is needed.
2. Multiple sequential service invocation calls ( `service A -> service B -> service C`)
Dapr generates the trace headers at the beginning of the request in service A, which are then propagated to service B. You are now responsible for taking the headers and propagating them to service C, since this is specific to your application.
`service A -> service B -> propagate trace headers to -> service C` and so on to further Dapr-enabled services.
In other words, if the app is calling to Dapr and wants to trace with an existing span (trace header), it must always propagate to Dapr (from service B to service C in this case). Dapr always propagates trace spans to an application.
{{% alert title="Note" color="primary" %}}
There are no helper methods exposed in Dapr SDKs to propagate and retrieve trace context. You need to use HTTP/gRPC clients to propagate and retrieve trace headers through HTTP headers and gRPC metadata.
{{% /alert %}}
3. Request is from external endpoint (for example, `from a gateway service to a Dapr-enabled service A`)
An external gateway ingress calls Dapr, which generates the trace headers and calls service A. Service A then calls service B and further Dapr-enabled services. You must propagate the headers from service A to service B: `Ingress -> service A -> propagate trace headers -> service B`. This is similar to case 2 above.
4. Pub/sub messages
Dapr generates the trace headers in the published message topic. These trace headers are propagated to any services listening on that topic.
### Propagating multiple different service calls
In the following scenarios, Dapr does some of the work for you and you need to either create or propagate trace headers.
1. Multiple service calls to different services from single service
When you are calling multiple services from a single service (see example below), you need to propagate the trace headers:
```
service A -> service B
[ .. some code logic ..]
service A -> service C
[ .. some code logic ..]
service A -> service D
[ .. some code logic ..]
```
In this case, when service A first calls service B, Dapr generates the trace headers in service A, which are then propagated to service B. These trace headers are returned in the response from service B as part of response headers. You then need to propagate the returned trace context to the next services, service C and service D, as Dapr does not know you want to reuse the same header.
### Generating your own trace context headers from non-Daprized applications
You may have chosen to generate your own trace context headers.
Generating your own trace context headers is more unusual and typically not required when calling Dapr. However, there are scenarios where you could specifically choose to add W3C trace headers into a service call; for example, you have an existing application that does not use Dapr. In this case, Dapr still propagates the trace context headers for you. If you decide to generate trace headers yourself, there are three ways this can be done:
1. You can use the industry standard [OpenTelemetry SDKs](https://opentelemetry.io/docs/instrumentation/) to generate trace headers and pass these trace headers to a Dapr-enabled service. This is the preferred method.
2. You can use a vendor SDK that provides a way to generate W3C trace headers and pass them to a Dapr-enabled service.
3. You can handcraft a trace context following [W3C trace context specifications](https://www.w3.org/TR/trace-context/) and pass them to a Dapr-enabled service.
## W3C trace context
Dapr uses the standard W3C trace context headers.
- For HTTP requests, Dapr uses `traceparent` header.
- For gRPC requests, Dapr uses `grpc-trace-bin` header.
When a request arrives without a trace ID, Dapr creates a new one. Otherwise, it passes the trace ID along the call chain.
Read [trace context overview]({{< ref w3c-tracing-overview >}}) for more background on W3C trace context.
## W3C trace headers
These are the specific trace context headers that are generated and propagated by Dapr for HTTP and gRPC.
### Trace context HTTP headers format
When propagating a trace context header from an HTTP response to an HTTP request, you copy these headers.
#### Traceparent header
The traceparent header represents the incoming request in a tracing system in a common format, understood by all vendors.
Heres an example of a traceparent header.
`traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01`
Find the traceparent fields detailed [here](https://www.w3.org/TR/trace-context/#traceparent-header).
#### Tracestate header
The tracestate header includes the parent in a potentially vendor-specific format:
`tracestate: congo=t61rcWkgMzE`
Find the tracestate fields detailed [here](https://www.w3.org/TR/trace-context/#tracestate-header).
### Trace context gRPC headers format
In the gRPC API calls, trace context is passed through `grpc-trace-bin` header.
## Related Links
- [Observability concepts]({{< ref observability-concept.md >}})
- [W3C Trace Context for distributed tracing]({{< ref w3c-tracing-overview >}})
- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/)
- [Observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability)

View File

@ -1,33 +0,0 @@
---
type: docs
title: "Trace context"
linkTitle: "Trace context"
weight: 4000
description: Background and scenarios for using W3C tracing with Dapr
type: docs
---
Dapr uses the [Open Telemetry protocol](https://opentelemetry.io/), which in turn uses the [W3C trace context](https://www.w3.org/TR/trace-context/) for distributed tracing for both service invocation and pub/sub messaging. Dapr generates and propagates the trace context information, which can be sent to observability tools for visualization and querying.
## Background
Distributed tracing is a methodology implemented by tracing tools to follow, analyze, and debug a transaction across multiple software components. Typically, a distributed trace traverses more than one service which requires it to be uniquely identifiable. Trace context propagation passes along this unique identification.
In the past, trace context propagation has typically been implemented individually by each different tracing vendor. In multi-vendor environments, this causes interoperability problems, such as:
- Traces that are collected by different tracing vendors cannot be correlated as there is no shared unique identifier.
- Traces that cross boundaries between different tracing vendors can not be propagated as there is no forwarded, uniformly agreed set of identification.
- Vendor-specific metadata might be dropped by intermediaries.
- Cloud platform vendors, intermediaries, and service providers cannot guarantee to support trace context propagation as there is no standard to follow.
In the past, these problems did not have a significant impact, as most applications were monitored by a single tracing vendor and stayed within the boundaries of a single platform provider. Today, an increasing number of applications are distributed and leverage multiple middleware services and cloud platforms.
This transformation of modern applications called for a distributed tracing context propagation standard. The [W3C trace context specification](https://www.w3.org/TR/trace-context/) defines a universally agreed-upon format for the exchange of trace context propagation data - referred to as trace context. Trace context solves the problems described above by:
* Providing a unique identifier for individual traces and requests, allowing trace data of multiple providers to be linked together.
* Providing an agreed-upon mechanism to forward vendor-specific trace data and avoid broken traces when multiple tracing tools participate in a single transaction.
* Providing an industry standard that intermediaries, platforms, and hardware providers can support.
A unified approach for propagating trace data improves visibility into the behavior of distributed applications, facilitating problem and performance analysis.
## Related Links
- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/)

View File

@ -5,3 +5,11 @@ linkTitle: "Publish & subscribe"
weight: 30
description: Secure, scalable messaging between services
---
{{% alert title="More about Dapr Pub/sub" color="primary" %}}
Learn more about how to use Dapr Pub/sub:
- Try the [Pub/sub quickstart]({{< ref pubsub-quickstart.md >}}).
- Explore pub/sub via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Pub/sub API reference documentation]({{< ref pubsub_api.md >}}).
- Browse the supported [pub/sub component specs]({{< ref supported-pubsub >}}).
{{% /alert %}}

View File

@ -14,9 +14,15 @@ Dapr uses CloudEvents to provide additional context to the event payload, enabli
- Content-type for proper deserialization of event data
- Verification of sender application
## CloudEvents example
You can choose any of three methods for publish a CloudEvent via pub/sub:
A publish operation to Dapr results in a cloud event envelope containing the following fields:
1. Send a pub/sub event, which is then wrapped by Dapr in a CloudEvent envelope.
1. Replace specific CloudEvents attributes provided by Dapr by overriding the standard CloudEvent properties.
1. Write your own CloudEvent envelope as part of the pub/sub event.
## Dapr-generated CloudEvents example
Sending a publish operation to Dapr automatically wraps it in a CloudEvent envelope containing the following fields:
- `id`
- `source`
@ -30,7 +36,9 @@ A publish operation to Dapr results in a cloud event envelope containing the fol
- `time`
- `datacontenttype` (optional)
The following example demonstrates a cloud event generated by Dapr for a publish operation to the `orders` topic that includes a W3C `traceid` unique to the message, the `data` and the fields for the CloudEvent where the data content is serialized as JSON.
The following example demonstrates a CloudEvent generated by Dapr for a publish operation to the `orders` topic that includes:
- A W3C `traceid` unique to the message
- The `data` and the fields for the CloudEvent where the data content is serialized as JSON
```json
{
@ -55,20 +63,112 @@ As another example of a v1.0 CloudEvent, the following shows data as XML content
```json
{
"specversion" : "1.0",
"type" : "xml.message",
"source" : "https://example.com/message",
"subject" : "Test XML Message",
"id" : "id-1234-5678-9101",
"time" : "2020-09-23T06:23:21Z",
"datacontenttype" : "text/xml",
"data" : "<note><to>User1</to><from>user2</from><message>hi</message></note>"
"topic": "orders",
"pubsubname": "order_pub_sub",
"traceid": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01",
"tracestate": "",
"data" : "<note><to></to><from>user2</from><message>Order</message></note>",
"id" : "id-1234-5678-9101",
"specversion" : "1.0",
"datacontenttype" : "text/xml",
"subject" : "Test XML Message",
"source" : "https://example.com/message",
"type" : "xml.message",
"time" : "2020-09-23T06:23:21Z"
}
```
## Replace Dapr generated CloudEvents values
Dapr automatically generates several CloudEvent properties. You can replace these generated CloudEvent properties by providing the following optional metadata key/value:
- `cloudevent-id`: overrides `id`
- `cloudevent-source`: overrides `source`
- `cloudevent-type`: overrides `type`
- `cloudevent-traceid`: overrides `traceid`
- `cloudevent-tracestate`: overrides `tracestate`
- `cloudevent-traceparent`: overrides `traceparent`
The ability to replace CloudEvents properties using these metadata properties applies to all pub/sub components.
### Example
For example, to replace the `source` and `id` values from [the CloudEvent example above]({{< ref "#cloudevents-example" >}}) in code:
{{< tabs "Python" ".NET" >}}
<!-- Python -->
{{% codetab %}}
```python
with DaprClient() as client:
order = {'orderId': i}
# Publish an event/message using Dapr PubSub
result = client.publish_event(
pubsub_name='order_pub_sub',
topic_name='orders',
publish_metadata={'cloudevent-id: 'd99b228f-6c73-4e78-8c4d-3f80a043d317', cloudevent-source: 'payment'}
)
```
{{% /codetab %}}
<!-- .NET -->
{{% codetab %}}
```csharp
var order = new Order(i);
using var client = new DaprClientBuilder().Build();
// Override cloudevent metadata
var metadata = new Dictionary<string,string>() {
{ "cloudevent.source", "payment" },
{ "cloudevent.id", "d99b228f-6c73-4e78-8c4d-3f80a043d317" }
}
// Publish an event/message using Dapr PubSub
await client.PublishEventAsync("order_pub_sub", "orders", order, metadata);
Console.WriteLine("Published data: " + order);
await Task.Delay(TimeSpan.FromSeconds(1));
```
{{% /codetab %}}
{{< /tabs >}}
The JSON payload then reflects the new `source` and `id` values:
```json
{
"topic": "orders",
"pubsubname": "order_pub_sub",
"traceid": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01",
"tracestate": "",
"data": {
"orderId": 1
},
"id": "d99b228f-6c73-4e78-8c4d-3f80a043d317",
"specversion": "1.0",
"datacontenttype": "application/json; charset=utf-8",
"source": "payment",
"type": "com.dapr.event.sent",
"time": "2020-09-23T06:23:21Z",
"traceparent": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01"
}
```
{{% alert title="Important" color="warning" %}}
While you can replace `traceid`/`traceparent` and `tracestate`, doing this may interfere with tracing events and report inconsistent results in tracing tools. It's recommended to use Open Telementry for distributed traces. [Learn more about distributed tracing.]({{< ref tracing-overview.md >}})
{{% /alert %}}
## Publish your own CloudEvent
If you want to use your own CloudEvent, make sure to specify the [`datacontenttype`]({{< ref "pubsub-overview.md#setting-message-content-types" >}}) as `application/cloudevents+json`.
If the CloudEvent that was authored by the app does not contain the [minimum required fields](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#required-attributes) in the CloudEvent specification, the message is rejected. Dapr adds the following fields to the CloudEvent if they are missing:
- `time`
@ -92,7 +192,7 @@ You can add additional fields to a custom CloudEvent that are not part of the of
Publish a CloudEvent to the `orders` topic:
```bash
dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{"specversion" : "1.0", "type" : "com.dapr.cloudevent.sent", "source" : "testcloudeventspubsub", "subject" : "Cloud Events Test", "id" : "someCloudEventId", "time" : "2021-08-02T09:00:00Z", "datacontenttype" : "application/cloudevents+json", "data" : {"orderId": "100"}}'
dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{\"orderId\": \"100\"}'
```
{{% /codetab %}}

View File

@ -25,13 +25,14 @@ The diagram below is an example of how dead letter topics work. First a message
The following YAML shows how to configure a subscription with a dead letter topic named `poisonMessages` for messages consumed from the `orders` topic. This subscription is scoped to an app with a `checkout` ID.
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: order
spec:
topic: orders
route: /checkout
routes:
default: /checkout
pubsubname: pubsub
deadLetterTopic: poisonMessages
scopes:
@ -86,13 +87,16 @@ spec:
Remember to now configure a subscription to handling the dead letter topics. For example you can create another declarative subscription to receive these on the same or a different application. The example below shows the checkout application subscribing to the `poisonMessages` topic with another subscription and sending these to be handled by the `/failedmessages` endpoint.
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: deadlettertopics
spec:
topic: poisonMessages
route: /failedMessages
routes:
rules:
- match:
path: /failedMessages
pubsubname: pubsub
scopes:
- checkout

View File

@ -141,13 +141,14 @@ $app->start();
Similarly, you can subscribe to raw events declaratively by adding the `rawPayload` metadata entry to your subscription specification.
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: myevent-subscription
spec:
topic: deathStarStatus
route: /dsstatus
routes:
default: /dsstatus
pubsubname: pubsub
metadata:
rawPayload: "true"

View File

@ -22,13 +22,14 @@ The examples below demonstrate pub/sub messaging between a `checkout` app and an
You can subscribe declaratively to a topic using an external component file. This example uses a YAML component file named `subscription.yaml`:
```yaml
apiVersion: dapr.io/v1alpha1
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: order
spec:
topic: orders
route: /checkout
routes:
default: /checkout
pubsubname: pubsub
scopes:
- orderprocessing
@ -186,7 +187,11 @@ The `/checkout` endpoint matches the `route` defined in the subscriptions and th
### Programmatic subscriptions
The programmatic approach returns the `routes` JSON structure within the code, unlike the declarative approach's `route` YAML structure. In the example below, you define the values found in the [declarative YAML subscription](#declarative-subscriptions) above within the application code.
The dynamic programmatic approach returns the `routes` JSON structure within the code, unlike the declarative approach's `route` YAML structure.
> **Note:** Programmatic subscriptions are only read once during application start-up. You cannot _dynamically_ add new programmatic subscriptions, only at new ones at compile time.
In the example below, you define the values found in the [declarative YAML subscription](#declarative-subscriptions) above within the application code.
{{< tabs ".NET" Java Python JavaScript Go>}}

View File

@ -5,3 +5,11 @@ linkTitle: "Secrets management"
weight: 70
description: Securely access secrets from your application
---
{{% alert title="More about Dapr Secrets" color="primary" %}}
Learn more about how to use Dapr Secrets:
- Try the [Secrets quickstart]({{< ref secrets-quickstart.md >}}).
- Explore secrets via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Secrets API reference documentation]({{< ref secrets_api.md >}}).
- Browse the supported [secrets component specs]({{< ref supported-secret-stores >}}).
{{% /alert %}}

View File

@ -5,3 +5,10 @@ linkTitle: "Service invocation"
weight: 10
description: Perform direct, secure, service-to-service method calls
---
{{% alert title="More about Dapr Service Invocation" color="primary" %}}
Learn more about how to use Dapr Service Invocation:
- Try the [Service Invocation quickstart]({{< ref serviceinvocation-quickstart.md >}}).
- Explore service invocation via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Service Invocation API reference documentation]({{< ref service_invocation_api.md >}}).
{{% /alert %}}

View File

@ -47,7 +47,7 @@ The diagram below is an overview of how Dapr's service invocation works when inv
## Using an HTTPEndpoint resource or FQDN URL for non-Dapr endpoints
There are two ways to invoke a non-Dapr endpoint when communicating either to Dapr applications or non-Dapr applications. A Dapr application can invoke a non-Dapr endpoint by providing one of the following:
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}}) guide for an example.
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}}) guide for an example.
```sh
localhost:3500/v1.0/invoke/<HTTPEndpoint-name>/method/<my-method>
@ -81,7 +81,7 @@ curl http://localhost:3602/v1.0/invoke/orderprocessor/method/checkout
## Related Links
- [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}})
- [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}})
- [Service invocation overview]({{< ref service-invocation-overview.md >}})
- [Service invocation API specification]({{< ref service_invocation_api.md >}})

View File

@ -5,3 +5,11 @@ linkTitle: "State management"
weight: 20
description: Create long running stateful services
---
{{% alert title="More about Dapr State Management" color="primary" %}}
Learn more about how to use Dapr State Management:
- Try the [State Management quickstart]({{< ref statemanagement-quickstart.md >}}).
- Explore state management via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [State Management API reference documentation]({{< ref state_api.md >}}).
- Browse the supported [state management component specs]({{< ref supported-state-stores >}}).
{{% /alert %}}

View File

@ -16,9 +16,15 @@ When state TTL has native support in the state store component, Dapr forwards th
When a TTL is not specified, the default behavior of the state store is retained.
## Persisting state (ignoring an existing TTL)
## Explicit persistence bypassing globally defined TTL
To explicitly persist a state (ignoring any TTLs set for the key), specify a `ttlInSeconds` value of `-1`.
Persisting state applies to all state stores that let you specify a default TTL used for all data, either:
- Setting a global TTL value via a Dapr component, or
- When creating the state store outside of Dapr and setting a global TTL value.
When no specific TTL is specified, the data expires after that global TTL period of time. This is not facilitated by Dapr.
In addition, all state stores also support the option to _explicitly_ persist data. This means you can ignore the default database policy (which may have been set outside of Dapr or via a Dapr Component) to indefinitely retain a given database record. You can do this by setting `ttlInSeconds` to the value of `-1`. This value indicates to ignore any TTL value set.
## Supported components
@ -71,7 +77,7 @@ using Dapr.Client;
await client.SaveStateAsync(storeName, stateKeyName, state, metadata: new Dictionary<string, string>() {
{
"metadata.ttlInSeconds", "120"
"ttlInSeconds", "120"
}
});
```

View File

@ -4,4 +4,11 @@ title: "Workflow"
linkTitle: "Workflow"
weight: 100
description: "Orchestrate logic across various microservices"
---
---
{{% alert title="More about Dapr Workflow" color="primary" %}}
Learn more about how to use Dapr Workflow:
- Try the [Workflow quickstart]({{< ref workflow-quickstart.md >}}).
- Explore workflow via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
- Review the [Workflow API reference documentation]({{< ref workflow_api.md >}}).
{{% /alert %}}

View File

@ -3,5 +3,5 @@ type: docs
title: "Authenticate to Azure"
linkTitle: "Authenticate to Azure"
weight: 1600
description: "Learn about authenticating Azure components using Azure Active Directory or Managed Service Identities"
description: "Learn about authenticating Azure components using Azure Active Directory or Managed Identities"
---

View File

@ -9,59 +9,74 @@ aliases:
weight: 10000
---
Certain Azure components for Dapr offer support for the *common Azure authentication layer*, which enables applications to access data stored in Azure resources by authenticating with Azure Active Directory (Azure AD). Thanks to this:
- Administrators can leverage all the benefits of fine-tuned permissions with Role-Based Access Control (RBAC).
- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Service Identities (MSI)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview).
Most Azure components for Dapr support authenticating with Azure AD (Azure Active Directory). Thanks to this:
- Administrators can leverage all the benefits of fine-tuned permissions with Azure Role-Based Access Control (RBAC).
- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Identities (MI)](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) and [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview). These offer the ability to authenticate your applications without having to manage sensitive credentials.
## About authentication with Azure AD
Azure AD is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services.
Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Key Vault, Cosmos DB, etc.
Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Service Bus, Azure Key Vault, Azure Cosmos DB, Azure Database for Postgres, Azure SQL, etc.
> In Azure terminology, an application is also called a "Service Principal".
Some Azure components offer alternative authentication methods, such as systems based on "master keys" or "shared keys". Although both master keys and shared keys are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD. Using Azure AD offers benefits like the following.
Some Azure components offer alternative authentication methods, such as systems based on "shared keys" or "access tokens". Although these are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD whenever possible to take advantage of many benefits, including:
### Managed Service Identities
- [Managed Identities and Workload Identity](#managed-identities-and-workload-identity)
- [Role-Based Access Control](#role-based-access-control)
- [Auditing](#auditing)
- [(Optional) Authentication using certificates](#optional-authentication-using-certificates)
With Managed Service Identities (MSI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service, an identity for your application can be assigned at the infrastructure level.
### Managed Identities and Workload Identity
With Managed Identities (MI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service (such as Azure VMs, Azure Container Apps, Azure Web Apps, etc), an identity for your application can be assigned at the infrastructure level.
Once using MI, your code doesn't have to deal with credentials, which:
Once using MSI, your code doesn't have to deal with credentials, which:
- Removes the challenge of managing credentials safely
- Allows greater separation of concerns between development and operations teams
- Reduces the number of people with access to credentials
- Simplifies operational aspectsespecially when multiple environments are used
### Role-based Access Control
Applications running on Azure Kubernetes Service can similarly leverage [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) to automatically provide an identity to individual pods.
When using Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make it read-only.
### Role-Based Access Control
When using Azure Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make the access read-only.
### Auditing
Using Azure AD provides an improved auditing experience for access.
Using Azure AD provides an improved auditing experience for access. Tenant administrators can consult audit logs to track authentication requests.
### (Optional) Authenticate using certificates
### (Optional) Authentication using certificates
While Azure AD allows you to use MSI or RBAC, you still have the option to authenticate using certificates.
While Azure AD allows you to use MI, you still have the option to authenticate using certificates.
## Support for other Azure environments
By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China, Azure Government, or Azure Germany, you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values:
By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China or Azure Government ("sovereign clouds"), you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values:
- Azure public cloud (default): `"AZUREPUBLICCLOUD"`
- Azure China: `"AZURECHINACLOUD"`
- Azure Government: `"AZUREUSGOVERNMENTCLOUD"`
- Azure Germany: `"AZUREGERMANCLOUD"`
- Azure public cloud (default): `"AzurePublicCloud"`
- Azure China: `"AzureChinaCloud"`
- Azure Government: `"AzureUSGovernmentCloud"`
> Support for sovereign clouds is experimental.
## Credentials metadata fields
To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component]({{< ref "#example-usage-in-a-dapr-component" >}}).
To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component](#example-usage-in-a-dapr-component).
### Metadata options
Depending on how you've passed credentials to your Dapr services, you have multiple metadata options.
Depending on how you've passed credentials to your Dapr services, you have multiple metadata options.
- [Using client credentials](#authenticating-using-client-credentials)
- [Using a certificate](#authenticating-using-a-certificate)
- [Using Managed Identities (MI)](#authenticating-with-managed-identities-mi)
- [Using Workload Identity on AKS](#authenticating-with-workload-identity-on-aks)
- [Using Azure CLI credentials (development-only)](#authenticating-using-azure-cli-credentials-development-only)
#### Authenticating using client credentials
@ -73,7 +88,7 @@ Depending on how you've passed credentials to your Dapr services, you have multi
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
#### Authenticating using a PFX certificate
#### Authenticating using a certificate
| Field | Required | Details | Example |
|--------|--------|--------|--------|
@ -85,27 +100,30 @@ When running on Kubernetes, you can also use references to Kubernetes secrets fo
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
#### Authenticating with Managed Service Identities (MSI)
#### Authenticating with Managed Identities (MI)
| Field | Required | Details | Example |
|-----------------|----------|----------------------------|------------------------------------------|
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
Using MSI, you're not required to specify any value, although you may pass `azureClientId` if needed.
Using Managed Identities, the `azureClientId` field is generally recommended. The field is optional when using a system-assigned identity, but may be required when using user-assigned identities.
### Aliases
#### Authenticating with Workload Identity on AKS
For backwards-compatibility reasons, the following values in the metadata are supported as aliases. Their use is discouraged.
When running on Azure Kubernetes Service (AKS), you can authenticate components using Workload Identity. Refer to the Azure AKS documentation on [enabling Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) for your Kubernetes resources.
| Metadata key | Aliases (supported but deprecated) |
|----------------------------|------------------------------------|
| `azureTenantId` | `spnTenantId`, `tenantId` |
| `azureClientId` | `spnClientId`, `clientId` |
| `azureClientSecret` | `spnClientSecret`, `clientSecret` |
| `azureCertificate` | `spnCertificate` |
| `azureCertificateFile` | `spnCertificateFile` |
| `azureCertificatePassword` | `spnCertificatePassword` |
#### Authenticating using Azure CLI credentials (development-only)
> **Important:** This authentication method is recommended for **development only**.
This authentication method can be useful while developing on a local machine. You will need:
- The [Azure CLI installed](https://learn.microsoft.com/cli/azure/install-azure-cli)
- Have successfully authenticated using the `az login` command
When Dapr is running on a host where there are credentials available for the Azure CLI, components can use those to authenticate automatically if no other authentication method is configuration.
Using this authentication method does not require setting any metadata option.
### Example usage in a Dapr component

View File

@ -62,6 +62,7 @@ Save the output values returned; you'll need them for Dapr to authenticate with
```
When adding the returned values to your Dapr component's metadata:
- `appId` is the value for `azureClientId`
- `password` is the value for `azureClientSecret` (this was randomly-generated)
- `tenant` is the value for `azureTenantId`
@ -93,11 +94,12 @@ Save the output values returned; you'll need them for Dapr to authenticate with
```
When adding the returned values to your Dapr component's metadata:
- `appId` is the value for `azureClientId`
- `tenant` is the value for `azureTenantId`
- `fileWithCertAndPrivateKey` indicates the location of the self-signed PFX certificate and private key. Use the contents of that file as `azureCertificate` (or write it to a file on the server and use `azureCertificateFile`)
> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as _PFX (PKCS#12)_.
> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as PFX (PKCS#12).
{{% /codetab %}}
@ -122,26 +124,13 @@ Expected output:
Service Principal ID: 1d0ccf05-5427-4b5e-8eb4-005ac5f9f163
```
The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID).
**The Service Principal ID** is:
- Defined within an Azure tenant
- Used to grant access to Azure resources to an application
The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID). The Service Principal ID is defined within an Azure tenant and used to grant access to Azure resources to an application
You'll use the Service Principal ID to grant permissions to an application to access Azure resources.
Meanwhile, **the client ID** is used by your application to authenticate. You'll use the client ID in Dapr manifests to configure authentication with Azure services.
Keep in mind that the Service Principal that was just created does not have access to any Azure resource by default. Access will need to be granted to each resource as needed, as documented in the docs for the components.
{{% alert title="Note" color="primary" %}}
This step is different from the [official Azure documentation](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli). The short-hand commands included in the official documentation creates a Service Principal that has broad `read-write` access to all Azure resources in your subscription, which:
- Grants your Service Principal more access than you likely desire.
- Applies _only_ to the Azure management plane (Azure Resource Manager, or ARM), which is irrelevant for Dapr components, which are designed to interact with the data plane of various services.
{{% /alert %}}
## Next steps
{{< button text="Use MSI >>" page="howto-msi.md" >}}
{{< button text="Use Managed Identities >>" page="howto-mi.md" >}}

View File

@ -1,14 +1,16 @@
---
type: docs
title: "How to: Use Managed Service Identities"
linkTitle: "How to: Use MSI"
title: "How to: Use Managed Identities"
linkTitle: "How to: Use MI"
weight: 40000
description: "Learn how to use Managed Service Identities"
aliases:
- "/developing-applications/integrations/azure/azure-authentication/howto-msi/"
description: "Learn how to use Managed Identities"
---
Using MSI, authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity.
Using Managed Identities (MI), authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity.
For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credential.
For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credentials.
To get started with managed identities, you need to assign an identity to a new or existing Azure resource. The instructions depend on the service use. Check the following official documentation for the most appropriate instructions:
@ -19,8 +21,9 @@ To get started with managed identities, you need to assign an identity to a new
- [Azure Virtual Machines Scale Sets (VMSS)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vmss)
- [Azure Container Instance (ACI)](https://docs.microsoft.com/azure/container-instances/container-instances-managed-identity)
Dapr supports both system-assigned and user-assigned identities.
After assigning a managed identity to your Azure resource, you will have credentials such as:
After assigning an identity to your Azure resource, you will have credentials such as:
```json
{
@ -31,7 +34,7 @@ After assigning a managed identity to your Azure resource, you will have credent
}
```
From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your Service Principal.
From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your identity.
## Next steps

View File

@ -14,4 +14,10 @@ The recommended approach for installing Dapr on AKS is to use the AKS Dapr exten
If you install Dapr through the AKS extension, best practice is to continue using the extension for future management of Dapr _instead of the Dapr CLI_. Combining the two tools can cause conflicts and result in undesired behavior.
{{% /alert %}}
Prerequisites for using the Dapr extension for AKS:
- [An Azure subscription](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)
- [The latest version of the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli)
- [An existing AKS cluster](https://learn.microsoft.com/azure/aks/tutorial-kubernetes-deploy-cluster)
- [The Azure Kubernetes Service RBAC Admin role](https://learn.microsoft.com/azure/role-based-access-control/built-in-roles#azure-kubernetes-service-rbac-admin)
{{< button text="Learn more about the Dapr extension for AKS" link="https://learn.microsoft.com/azure/aks/dapr" >}}

View File

@ -132,7 +132,7 @@ The following steps will show how to create an app that exposes a server for wit
"github.com/golang/protobuf/ptypes/empty"
commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1"
pb "github.com/dapr/go-sdk/dapr/proto/runtime/v1"
pb "github.com/dapr/dapr/pkg/proto/runtime/v1"
"google.golang.org/grpc"
)
```

View File

@ -83,7 +83,7 @@ apps:
appProtocol: http
appPort: 8080
appHealthCheckPath: "/healthz"
command: ["python3" "app.py"]
command: ["python3", "app.py"]
appLogDestination: file # (optional), can be file, console or fileAndConsole. default is fileAndConsole.
daprdLogDestination: file # (optional), can be file, console or fileAndConsole. default is file.
- appID: backend # optional

View File

@ -11,34 +11,25 @@ The Dapr SDKs are the easiest way for you to get Dapr into your application. Cho
## SDK packages
- **Client SDK**: The Dapr client allows you to invoke Dapr building block APIs and perform actions such as:
- [Invoke]({{< ref service-invocation >}}) methods on other services
- Store and get [state]({{< ref state-management >}})
- [Publish and subscribe]({{< ref pubsub >}}) to message topics
- Interact with external resources through input and output [bindings]({{< ref bindings >}})
- Get [secrets]({{< ref secrets >}}) from secret stores
- Interact with [virtual actors]({{< ref actors >}})
- **Server extensions**: The Dapr service extensions allow you to create services that can:
- Be [invoked]({{< ref service-invocation >}}) by other services
- [Subscribe]({{< ref pubsub >}}) to topics
- **Actor SDK**: The Dapr Actor SDK allows you to build virtual actors with:
- Methods that can be [invoked]({{< ref "howto-actors.md#actor-method-invocation" >}}) by other services
- [State]({{< ref "howto-actors.md#actor-state-management" >}}) that can be stored and retrieved
- [Timers]({{< ref "howto-actors.md#actor-timers" >}}) with callbacks
- Persistent [reminders]({{< ref "howto-actors.md#actor-reminders" >}})
Select your [preferred language below]({{< ref "#sdk-languages" >}}) to learn more about client, server, actor, and workflow packages.
- **Client**: The Dapr client allows you to invoke Dapr building block APIs and perform each building block's actions
- **Server extensions**: The Dapr service extensions allow you to create services that can be invoked by other services and subscribe to topics
- **Actor**: The Dapr Actor SDK allows you to build virtual actors with methods, state, timers, and persistent reminders
- **Workflow**: Dapr Workflow makes it easy for you to write long running business logic and integrations in a reliable way
## SDK languages
| Language | Status | Client SDK | Server extensions | Actor SDK |
|----------|:------|:----------:|:-----------:|:---------:|
| [.NET]({{< ref dotnet >}}) | Stable | ✔ | [ASP.NET Core](https://github.com/dapr/dotnet-sdk/tree/master/examples/AspNetCore) | ✔ |
| [Python]({{< ref python >}}) | Stable | ✔ | [gRPC]({{< ref python-grpc.md >}}) <br />[FastAPI]({{< ref python-fastapi.md >}})<br />[Flask]({{< ref python-flask.md >}})| ✔ |
| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ |
| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ |
| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ |
| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ |
| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | |
| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | |
| Language | Status | Client | Server extensions | Actor | Workflow |
|----------|:------|:----------:|:-----------:|:---------:|:---------:|
| [.NET]({{< ref dotnet >}}) | Stable | ✔ | [ASP.NET Core](https://github.com/dapr/dotnet-sdk/tree/master/examples/AspNetCore) | ✔ | ✔ |
| [Python]({{< ref python >}}) | Stable | ✔ | [gRPC]({{< ref python-grpc.md >}}) <br />[FastAPI]({{< ref python-fastapi.md >}})<br />[Flask]({{< ref python-flask.md >}})| ✔ | ✔ |
| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ | |
| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ | |
| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ | |
| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ | |
| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | | |
| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | | |
## Further reading

View File

@ -12,12 +12,6 @@ The workflow building block is currently in **alpha**.
Let's take a look at the Dapr [Workflow building block]({{< ref workflow >}}). In this Quickstart, you'll create a simple console application to demonstrate Dapr's workflow programming model and the workflow management APIs.
The `order-processor` console app starts and manages the lifecycle of the `OrderProcessingWorkflow` workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks:
- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow
- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase
- `ProcessPaymentActivity`: Processes and authorizes the payment
- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value
In this guide, you'll:
- Run the `order-processor` application.
@ -26,13 +20,19 @@ In this guide, you'll:
<img src="/images/workflow-quickstart-overview.png" width=800 style="padding-bottom:15px;">
Currently, you can experience the Dapr Workflow using the .NET SDK.
{{< tabs ".NET" "Python" >}}
<!-- .NET -->
{{% codetab %}}
The `order-processor` console app starts and manages the lifecycle of an order processing workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks:
- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow
- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase
- `ProcessPaymentActivity`: Processes and authorizes the payment
- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value
### Step 1: Pre-requisites
For this example, you will need:
@ -259,6 +259,16 @@ The `Activities` directory holds the four workflow activities used by the workfl
<!-- Python -->
{{% codetab %}}
The `order-processor` console app starts and manages the `order_processing_workflow`, which simulates purchasing items from a store. The workflow consists of five unique workflow activities, or tasks:
- `notify_activity`: Utilizes a logger to print out messages throughout the workflow. These messages notify you when:
- You have insufficient inventory
- Your payment couldn't be processed, etc.
- `process_payment_activity`: Processes and authorizes the payment.
- `verify_inventory_activity`: Checks the state store to ensure there is enough inventory present for purchase.
- `update_inventory_activity`: Removes the requested items from the state store and updates the store with the new remaining inventory value.
- `request_approval_activity`: Seeks approval from the manager if payment is greater than 50,000 USD.
### Step 1: Pre-requisites
For this example, you will need:

View File

@ -42,6 +42,9 @@ Even though metadata values can contain secrets in plain text, it is recommended
Depending on the pub/sub message bus you are using and how it is configured, topics may be created automatically. Even if the message bus supports automatic topic creation, it is a common governance practice to disable it in production environments. You may still need to use a CLI, admin console, or request form to manually create the topics required by your application.
{{% /alert %}}
While all pub/sub components support `consumerID` metadata, the runtime creates a consumer ID if you do not supply one. All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup.
For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}).
Visit [this guide]({{< ref "howto-publish-subscribe.md#step-3-publish-a-topic" >}}) for instructions on configuring and using pub/sub components.
## Related links

View File

@ -66,6 +66,7 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/configuration.yaml
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/subscription.yaml
kubectl apply -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/resiliency.yaml
kubectl apply -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/httpendpoints.yaml
```
```bash

View File

@ -15,118 +15,120 @@ description: "Enable Dapr metrics and logs with Azure Monitor for Azure Kubernet
## Enable Prometheus metric scrape using config map
1. Make sure that omsagents are running
1. Make sure that Azure Monitor Agents (AMA) are running.
```bash
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
...
omsagent-75qjs 1/1 Running 1 44h
omsagent-c7c4t 1/1 Running 0 44h
omsagent-rs-74f488997c-dshpx 1/1 Running 1 44h
omsagent-smtk7 1/1 Running 1 44h
...
```
```bash
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
...
ama-logs-48kpv 2/2 Running 0 2d13h
ama-logs-mx24c 2/2 Running 0 2d13h
ama-logs-rs-f9bbb9898-vbt6k 1/1 Running 0 30h
ama-logs-sm2mz 2/2 Running 0 2d13h
ama-logs-z7p4c 2/2 Running 0 2d13h
...
```
2. Apply config map to enable Prometheus metrics endpoint scrape.
1. Apply config map to enable Prometheus metrics endpoint scrape.
You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable prometheus metrics endpoint scrape.
You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable Prometheus metrics endpoint scrape.
If you installed Dapr to the different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example:
If you installed Dapr to a different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example:
```yaml
...
prometheus-data-collection-settings: |-
[prometheus_data_collection_settings.cluster]
interval = "1m"
monitor_kubernetes_pods = true
monitor_kubernetes_pods_namespaces = ["dapr-system", "default"]
[prometheus_data_collection_settings.node]
interval = "1m"
...
```
```yaml
...
prometheus-data-collection-settings: |-
[prometheus_data_collection_settings.cluster]
interval = "1m"
monitor_kubernetes_pods = true
monitor_kubernetes_pods_namespaces = ["dapr-system", "default"]
[prometheus_data_collection_settings.node]
interval = "1m"
...
```
Apply config map:
Apply config map:
```bash
kubectl apply -f ./azm-config.map.yaml
```
```bash
kubectl apply -f ./azm-config.map.yaml
```
## Install Dapr with JSON formatted logs
1. Install Dapr with enabling JSON-formatted logs
1. Install Dapr with enabling JSON-formatted logs.
```bash
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
```
```bash
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
```
2. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations.
1. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations.
> Note: OMS Agent scrapes the metrics only if replicaset has Prometheus annotations.
> Note: The Azure Monitor Agents (AMA) only sends the metrics if the Prometheus annotations are set.
Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml.
Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml.
Example:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pythonapp
namespace: default
labels:
app: python
spec:
replicas: 1
selector:
matchLabels:
app: python
template:
metadata:
labels:
app: python
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "pythonapp"
dapr.io/log-as-json: "true"
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/"
Example:
...
```
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pythonapp
namespace: default
labels:
app: python
spec:
replicas: 1
selector:
matchLabels:
app: python
template:
metadata:
labels:
app: python
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "pythonapp"
dapr.io/log-as-json: "true"
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/"
...
```
## Search metrics and logs with Azure Monitor
1. Go to Azure Monitor
1. Go to Azure Monitor in the Azure portal.
2. Search Dapr logs
1. Search Dapr **Logs**.
Here is an example query, to parse JSON formatted logs and query logs from dapr system processes.
Here is an example query, to parse JSON formatted logs and query logs from Dapr system processes.
```
ContainerLog
| extend parsed=parse_json(LogEntry)
| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance']
| where level != ""
| sort by Time
```
```
ContainerLog
| extend parsed=parse_json(LogEntry)
| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance']
| where level != ""
| sort by Time
```
3. Search metrics
1. Search **Metrics**.
This query, queries process_resident_memory_bytes Prometheus metrics for Dapr system processes and renders timecharts
This query, queries `process_resident_memory_bytes` Prometheus metrics for Dapr system processes and renders timecharts.
```
InsightsMetrics
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
| extend tags=parse_json(Tags)
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
| where app startswith "dapr-"
| render timechart
```
```
InsightsMetrics
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
| extend tags=parse_json(Tags)
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
| where app startswith "dapr-"
| render timechart
```
# References
## References
* [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration)
* [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config)
* [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language)
- [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration)
- [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config)
- [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language)

View File

@ -32,8 +32,8 @@ Patch support is for supported versions (current and previous).
The Dapr's sidecar image is published to both [GitHub Container Registry](https://github.com/dapr/dapr/pkgs/container/daprd) and [Docker Registry](https://hub.docker.com/r/daprio/daprd/tags). The default image contains all components. From version 1.11, Dapr also offers a variation of the sidecar image, containing only stable components.
* Default sidecar images: `daprio/daprd:<version>` or `ghcr.io/dapr/daprd:<version>` (for example `ghcr.io/dapr/daprd:1.11.0`)
* Sidecar images for stable components: `daprio/daprd:<version>-stablecomponents` or `ghcr.io/dapr/daprd:<version>-stablecomponents` (for example `ghcr.io/dapr/daprd:1.11.0-stablecomponents`)
* Default sidecar images: `daprio/daprd:<version>` or `ghcr.io/dapr/daprd:<version>` (for example `ghcr.io/dapr/daprd:1.11.1`)
* Sidecar images for stable components: `daprio/daprd:<version>-stablecomponents` or `ghcr.io/dapr/daprd:<version>-stablecomponents` (for example `ghcr.io/dapr/daprd:1.11.1-stablecomponents`)
On Kubernetes, the sidecar image can be overwritten for the application Deployment resource with the `dapr.io/sidecar-image` annotation. See more about [Dapr's arguments and annotations]({{<ref "arguments-annotations-overview.md" >}}). The default 'daprio/daprd:latest' image is used if not specified.
@ -45,6 +45,7 @@ The table below shows the versions of Dapr releases that have been tested togeth
| Release date | Runtime | CLI | SDKs | Dashboard | Status |
|--------------------|:--------:|:--------|---------|---------|---------|
| June 22nd 2023 | 1.11.1</br> | 1.11.0 | Java 1.9.0 </br>Go 1.8.0 </br>PHP 1.1.0 </br>Python 1.10.0 </br>.NET 1.11.0 </br>JS 3.1.0 | 0.13.0 | Supported (current) |
| June 12th 2023 | 1.11.0</br> | 1.11.0 | Java 1.9.0 </br>Go 1.8.0 </br>PHP 1.1.0 </br>Python 1.10.0 </br>.NET 1.11.0 </br>JS 3.1.0 | 0.13.0 | Supported (current) |
| May 15th 2023 | 1.10.7</br> | 1.10.0 | Java 1.8.0 </br>Go 1.7.0 </br>PHP 1.1.0 </br>Python 1.9.0 </br>.NET 1.10.0 </br>JS 3.0.0 | 0.11.0 | Supported |
| May 12th 2023 | 1.10.6</br> | 1.10.0 | Java 1.8.0 </br>Go 1.7.0 </br>PHP 1.1.0 </br>Python 1.9.0 </br>.NET 1.10.0 </br>JS 3.0.0 | 0.11.0 | Supported |
@ -116,8 +117,8 @@ General guidance on upgrading can be found for [self hosted mode]({{< ref self-h
| | 1.9.6 | 1.10.7 |
| 1.8.0 to 1.8.6 | N/A | 1.9.6 |
| 1.9.0 | N/A | 1.9.6 |
| 1.10.0 | N/A | 1.10.7 |
| 1.11.0 | N/A | 1.11.0 |
| 1.10.0 | N/A | 1.10.8 |
| 1.11.0 | N/A | 1.11.1 |
## Upgrade on Hosting platforms

View File

@ -37,6 +37,45 @@ If running on kubernetes apply the component to your cluster.
> **Note:** In production never place passwords or secrets within Dapr component files. For information on securely storing and retrieving secrets using secret stores refer to [Setup Secret Store]({{< ref setup-secret-store >}})
### Binding direction (optional)
In some scenarios, it would be useful to provide additional information to Dapr to indicate the direction supported by the binding component.
Providing the supported binding direction helps the Dapr sidecar avoid the `"wait for the app to become ready"` state, where it waits indefinitely for the application to become available.
You can specify the `direction` field as part of the component's metadata. The valid values for this field are:
- `"input"`
- `"output"`
- `"input, output"`
Here a few scenarios when the `"direction"` metadata field could help:
- When an application (detached from the sidecar) runs as a serverless workload and is scaled to zero, the `"wait for the app to become ready"` check done by the Dapr sidecar becomes pointless.
- If the detached Dapr sidecar is scaled to zero and the application reaches the sidecar (before even starting an HTTP server), the `"wait for the app to become ready"` deadlocks the app and the sidecar into waiting for each other.
### Example
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: kafkaevent
spec:
type: bindings.kafka
version: v1
metadata:
- name: brokers
value: "http://localhost:5050"
- name: topics
value: "someTopic"
- name: publishTopic
value: "someTopic2"
- name: consumerGroup
value: "group1"
- name: "direction"
value: "input, output"
```
## Invoking Service Code Through Input Bindings
A developer who wants to trigger their app using an input binding can listen on a `POST` http endpoint with the route name being the same as `metadata.name`.

View File

@ -179,7 +179,7 @@ Example:
"topic": "newOrder",
"route": "/orders",
"metadata": {
"rawPayload": "true",
"rawPayload": "true"
}
}
]

View File

@ -28,26 +28,20 @@ name | the name of the secret to get
#### Query Parameters
Some secret stores have **optional** metadata properties. metadata is populated using query parameters:
Some secret stores support **optional**, per-request metadata properties. Use query parameters to provide those properties. For example:
```
GET http://localhost:<daprPort>/v1.0/secrets/<secret-store-name>/<name>?metadata.version_id=15
```
##### GCP Secret Manager
The following optional meta can be provided to the GCP Secret Manager component
Observe that not all secret stores support the same set of parameters. For example:
- Hashicorp Vault, GCP Secret Manager and AWS Secret Manager support the `version_id` parameter
- Only AWS Secret Manager supports the `version_stage` parameter
- Only Kubernetes Secrets supports the `namespace` parameter
Check each [secret store's documentation]({{< ref supported-secret-stores.md >}}) for the list of supported parameters.
Query Parameter | Description
--------- | -----------
metadata.version_id | version for the given secret key
##### AWS Secret Manager
The following optional meta can be provided to the AWS Secret Manager component
Query Parameter | Description
--------- | -----------
metadata.version_id | version for the given secret key
metadata.version_stage | version stage for the given secret key
### HTTP Response
@ -101,17 +95,11 @@ Code | Description
### Examples
```shell
curl http://localhost:3500/v1.0/secrets/vault/db-secret
curl http://localhost:3500/v1.0/secrets/mySecretStore/db-secret
```
```shell
curl http://localhost:3500/v1.0/secrets/vault/db-secret?metadata.version_id=15&metadata.version_stage=AAA
```
> Note, in case of deploying into namespace other than default, the above query will also have to include the namespace metadata (e.g. `production` below)
```shell
curl http://localhost:3500/v1.0/secrets/vault/db-secret?metadata.version_id=15&?metadata.namespace=production
curl http://localhost:3500/v1.0/secrets/myAwsSecretStore/db-secret?metadata.version_id=15&metadata.version_stage=production
```
## Get Bulk Secret

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Azure Pub/Sub binding create a component of type `bindings.gcp.pubsub`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
To setup GCP Pub/Sub binding create a component of type `bindings.gcp.pubsub`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
```yaml

View File

@ -39,6 +39,8 @@ spec:
secretKeyRef:
name: kafka-secrets
key: saslPasswordSecret
- name: saslMechanism
value: "SHA-512"
- name: initialOffset # Optional. Used for input bindings.
value: "newest"
- name: maxMessageBytes # Optional.
@ -61,6 +63,7 @@ spec:
| authType | Y | Input/Output | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"` |
| saslUsername | N | Input/Output | The SASL username used for authentication. Only required if `authRequired` is set to `"true"`. | `"adminuser"` |
| saslPassword | N | Input/Output | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authRequired` is set to `"true"`. | `""`, `"KeFg23!"` |
| saslMechanism | N | Input/Output | The SASL authentication mechanism you'd like to use. Only required if `authtype` is set to `"password"`. If not provided, defaults to `PLAINTEXT`, which could cause a break for some services, like Amazon Managed Service for Kafka. | `"SHA-512", "SHA-256", "PLAINTEXT"` |
| initialOffset | N | Input | The initial offset to use if no offset was previously committed. Should be "newest" or "oldest". Defaults to "newest". | `"oldest"` |
| maxMessageBytes | N | Input/Output | The maximum size in bytes allowed for a single Kafka message. Defaults to 1024. | `2048` |
| oidcTokenEndpoint | N | Input/Output | Full URL to an OAuth2 identity provider access token endpoint. Required when `authType` is set to `oidc` | "https://identity.example.com/v1/token" |

View File

@ -57,6 +57,8 @@ The above example uses secrets as plain strings. It is recommended to use a secr
## Spec metadata fields
> When a new RabbitMQ message gets published, all values from the associated metadata are added to the message's header values.
| Field | Required | Binding support | Details | Example |
|--------------------|:--------:|------------|-----|---------|
| queueName | Y | Input/Output | The RabbitMQ queue name | `"myqueue"` |
@ -73,6 +75,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| caCert | N | Input/Output | The CA certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` |
| clientCert | N | Input/Output | The client certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` |
| clientKey | N | Input/Output | The client key to use for TLS connection. Defaults to `null`. | `"-----BEGIN PRIVATE KEY-----\nMI..."` |
## Binding support
This component supports both **input and output** binding interfaces.

View File

@ -96,6 +96,8 @@ An HTTP 204 (No Content) and empty body is returned if successful.
You can get a record in Redis using the `get` operation. This gets a key that was previously set.
This takes an optional parameter `delete`, which is by default `false`. When it is set to `true`, this operation uses the `GETDEL` operation of Redis. For example, it returns the `value` which was previously set and then deletes it.
#### Request
```json
@ -120,6 +122,20 @@ You can get a record in Redis using the `get` operation. This gets a key that wa
}
```
#### Request with delete flag
```json
{
"operation": "get",
"metadata": {
"key": "key1",
"delete": "true"
},
"data": {
}
}
```
### delete
You can delete a record in Redis using the `delete` operation. Returns success whether the key exists or not.

View File

@ -153,6 +153,36 @@ curl -X POST http://localhost:3500/v1.0/bindings/myServiceBusQueue \
{{< /tabs >}}
## Schedule a message
A message can be scheduled for delayed processing.
To schedule a message, use the `metadata` section in the request body during the binding invocation: the field name is `ScheduledEnqueueTimeUtc`.
The supported timestamp formats are [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339).
{{< tabs "Linux">}}
{{% codetab %}}
```shell
curl -X POST http://localhost:3500/v1.0/bindings/myServiceBusQueue \
-H "Content-Type: application/json" \
-d '{
"data": {
"message": "Hi"
},
"metadata": {
"ScheduledEnqueueTimeUtc": "Tue, 02 Jan 2024 15:04:05 GMT"
},
"operation": "create"
}'
```
{{% /codetab %}}
{{< /tabs >}}
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})

View File

@ -27,6 +27,8 @@ spec:
value: "***********"
- name: queueName
value: "myqueue"
# - name: pollingInterval
# value: "30s"
# - name: ttlInSeconds
# value: "60"
# - name: decodeBase64
@ -50,6 +52,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| `accountName` | Y | Input/Output | The name of the Azure Storage account | `"account1"` |
| `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Azure AD authentication. | `"access-key"` |
| `queueName` | Y | Input/Output | The name of the Azure Storage queue | `"myqueue"` |
| `pollingInterval` | N | Output | Set the interval to poll Azure Storage Queues for new messages, as a Go duration value. Default: `"10s"` | `"30s"` |
| `ttlInSeconds` | N | Output | Parameter to set the default message time to live. If this parameter is omitted, messages will expire after 10 minutes. See [also](#specifying-a-ttl-per-message) | `"60"` |
| `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to Storage Queues. (In case of saving a file with binary content). Defaults to `false` | `true`, `false` |
| `encodeBase64` | N | Output | If enabled base64 encodes the data payload before uploading to Azure storage queues. Default `false`. | `true`, `false` |

View File

@ -38,7 +38,7 @@ The Azure Key Vault cryptography component supports authentication with Azure AD
1. Read the [Authenticating to Azure]({{< ref "authenticating-azure.md" >}}) document.
1. Create an [Azure AD application]({{< ref "howto-aad.md" >}}) (also called a Service Principal).
1. Alternatively, create a [managed identity]({{< ref "howto-msi.md" >}}) for your application platform.
1. Alternatively, create a [managed identity]({{< ref "howto-mi.md" >}}) for your application platform.
## Spec metadata fields
@ -48,5 +48,6 @@ The Azure Key Vault cryptography component supports authentication with Azure AD
| Auth metadata | Y | See [Authenticating to Azure]({{< ref "authenticating-azure.md" >}}) for more information | |
## Related links
- [Cryptography building block]({{< ref cryptography >}})
- [Authenticating to Azure]({{< ref azure-authentication >}})

View File

@ -22,22 +22,30 @@ spec:
version: v1
metadata:
- name: audience
value: "<your token audience; e.g. the application's client ID>"
value: "<your token audience; i.e. the application's client ID>"
- name: issuer
value: "<your token issuer, e.g. 'https://accounts.google.com'>"
# Optional values
- name: jwksURL
value: "https://accounts.google.com/.well-known/openid-configuration"
value: "<JWKS URL, e.g. 'https://accounts.google.com/.well-known/openid-configuration'>"
```
## Spec metadata fields
| Field | Required | Details | Example |
|-------|:--------:|---------|---------|
| `audience` | Y | The audience expected in the tokens. Usually, this corresponds to the client ID of your application that is created as part of a credential hosted by a OpenID Connect platform. |
| `issuer` | Y | The issuer authority, which is the value expected in the issuer claim in the tokens. | `"https://accounts.google.com"`, `"https://login.salesforce.com"`
| `issuer` | Y | The issuer authority, which is the value expected in the issuer claim in the tokens. | `"https://accounts.google.com"`
| `jwksURL` | N | Address of the JWKS (JWK Set containing the public keys for verifying tokens). If empty, will try to fetch the URL set in the OpenID Configuration document `<issuer>/.well-known/openid-configuration`. | `"https://accounts.google.com/.well-known/openid-configuration"`
Common values for `issuer` include:
- Auth0: `https://{domain}`, where `{domain}` is the domain of your Auth0 application
- Azure AD: `https://login.microsoftonline.com/{tenant}/v2.0`, where `{tenant}` should be replaced with the tenant ID of your application, as a UUID
- Google: `https://accounts.google.com`
- Salesforce (Force.com): `https://login.salesforce.com`
## Dapr configuration
To be applied, the middleware must be referenced in [configuration]({{< ref configuration-concept.md >}}). See [middleware pipelines]({{< ref "middleware.md">}}).

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Apache Kafka pubsub create a component of type `pubsub.kafka`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}).
To set up Apache Kafka pub/sub, create a component of type `pubsub.kafka`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup.
For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}).
@ -27,6 +27,8 @@ spec:
value: "dapr-kafka.myapp.svc.cluster.local:9092"
- name: consumerGroup # Optional. Used for input bindings.
value: "{namespace}"
- name: consumerID # Optional. If not supplied, runtime will create one.
value: "channel1"
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
value: "my-dapr-app-id"
- name: authType # Required.
@ -49,12 +51,15 @@ spec:
value: "true"
```
> For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}).
## Spec metadata fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| brokers | Y | A comma-separated list of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"`
| consumerGroup | N | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"`
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"sarama"`. | `"my-dapr-app"`
| authRequired | N | *Deprecated* Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"`
| authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"`

View File

@ -9,7 +9,8 @@ aliases:
## Component format
To setup AWS SNS/SQS for pub/sub, create a component of type `pubsub.snssqs`. [Learn more on how to create and apply a pubsub configuration]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}).
To set up AWS SNS/SQS pub/sub, create a component of type `pubsub.aws.snssqs`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -17,7 +18,7 @@ kind: Component
metadata:
name: snssqs-pubsub
spec:
type: pubsub.snssqs
type: pubsub.aws.snssqs
version: v1
metadata:
- name: accessKey
@ -26,9 +27,11 @@ spec:
value: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
- name: region
value: "us-east-1"
# - name: consumerID # Optional. If not supplied, runtime will create one.
# value: "channel1"
# - name: endpoint # Optional.
# value: "http://localhost:4566"
# - name: sessionToken # Optional (mandatory if using AssignedRole, i.e. temporary accessKey and secretKey)
# - name: sessionToken # Optional (mandatory if using AssignedRole; for example, temporary accessKey and secretKey)
# value: "TOKEN"
# - name: messageVisibilityTimeout # Optional
# value: 10
@ -59,7 +62,7 @@ spec:
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use [a secret store for the secrets]]({{< ref component-secrets.md >}}).
The above example uses secrets as plain strings. It is recommended to use [a secret store for the secrets]({{< ref component-secrets.md >}}).
{{% /alert %}}
## Spec metadata fields
@ -69,6 +72,7 @@ The above example uses secrets as plain strings. It is recommended to use [a sec
| accessKey | Y | ID of the AWS account/role with appropriate permissions to SNS and SQS (see below) | `"AKIAIOSFODNN7EXAMPLE"`
| secretKey | Y | Secret for the AWS user/role. If using an `AssumeRole` access, you will also need to provide a `sessionToken` |`"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"`
| region | Y | The AWS region where the SNS/SQS assets are located or be created in. See [this page](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ugi&l=na) for valid regions. Ensure that SNS and SQS are available in that region | `"us-east-1"`
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| endpoint | N | AWS endpoint for the component to use. Only used for local development with, for example, [localstack](https://github.com/localstack/localstack). The `endpoint` is unncessary when running against production AWS | `"http://localhost:4566"`
| sessionToken | N | AWS session token to use. A session token is only required if you are using temporary security credentials | `"TOKEN"`
| messageReceiveLimit | N | Number of times a message is received, after processing of that message fails, that once reached, results in removing of that message from the queue. If `sqsDeadLettersQueueName` is specified, `messageReceiveLimit` is the number of times a message is received, after processing of that message fails, that once reached, results in moving of the message to the SQS dead-letters queue. Default: `10` | `10`
@ -143,7 +147,7 @@ kind: Component
metadata:
name: snssqs-pubsub
spec:
type: pubsub.snssqs
type: pubsub.aws.snssqs
version: v1
metadata:
- name: accessKey
@ -242,7 +246,7 @@ In order to run in AWS, create or assign an IAM user with permissions to the SNS
Plug the `AWS account ID` and `AWS account secret` into the `accessKey` and `secretKey` in the component metadata, using Kubernetes secrets and `secretKeyRef`.
Alternatively, let's say you want to provision the SNS and SQS assets using your own tool of choice (e.g. Terraform) while preventing Dapr from doing so dynamically. You need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role, with a policy like:
Alternatively, let's say you want to provision the SNS and SQS assets using your own tool of choice (for example, Terraform) while preventing Dapr from doing so dynamically. You need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role, with a policy like:
```json
{

View File

@ -9,7 +9,8 @@ aliases:
## Component format
To setup an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
To set up an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
Apart from the configuration metadata fields shown below, Azure Event Hubs also supports [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms.
```yaml
@ -28,6 +29,8 @@ spec:
# Use eventHubNamespace when using Azure AD
- name: eventHubNamespace
value: "namespace"
- name: consumerID # Optional. If not supplied, the runtime will create one.
value: "channel1"
- name: enableEntityManagement
value: "false"
# The following four properties are needed only if enableEntityManagement is set to true
@ -61,6 +64,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| `connectionString` | Y* | Connection string for the Event Hub or the Event Hub namespace.<br>* Mutally exclusive with `eventHubNamespace` field.<br>* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"`
| `eventHubNamespace` | Y* | The Event Hub Namespace name.<br>* Mutally exclusive with `connectionString` field.<br>* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"`
| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| `storageAccountName` | Y | Storage account name to use for the checkpoint store. |`"myeventhubstorage"`
| `storageAccountKey` | Y* | Storage account key for the checkpoint store account.<br>* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"`
| `storageConnectionString` | Y* | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey=<account-key>"`

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Azure Service Bus Queues pubsub create a component of type `pubsub.azure.servicebus.queues`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up Azure Service Bus Queues pub/sub, create a component of type `pubsub.azure.servicebus.queues`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
> This component uses queues on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions).
> For using topics, see the [Azure Service Bus Topics pubsub component]({{< ref "setup-azure-servicebus-topics" >}}).
@ -28,6 +28,8 @@ spec:
# Required when not using Azure AD Authentication
- name: connectionString
value: "Endpoint=sb://{ServiceBusNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={ServiceBus}"
# - name: consumerID # Optional
# value: channel1
# - name: timeoutInSec # Optional
# value: 60
# - name: handlerTimeoutInSec # Optional
@ -69,6 +71,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above
| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` |
| `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30`
| `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30`
@ -134,6 +137,8 @@ To set Azure Service Bus metadata when sending a message, set the query paramete
> **Note:** The `metadata.MessageId` property does not set the `id` property of the cloud event returned by Dapr and should be treated in isolation.
> **Note:** The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats.
### Receiving a message with metadata
When Dapr calls your application, it will attach Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata.

View File

@ -10,7 +10,7 @@ aliases:
## Component format
To setup Azure Service Bus Topics pubsub create a component of type `pubsub.azure.servicebus.topics`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up Azure Service Bus Topics pub/sub, create a component of type `pubsub.azure.servicebus.topics`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
> This component uses topics on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions).
> For using queues, see the [Azure Service Bus Queues pubsub component]({{< ref "setup-azure-servicebus-queues" >}}).
@ -75,7 +75,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above
| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` |
| `consumerID` | N | Consumer ID (a.k.a consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. |
| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. (`appID`) value. |
| `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30`
| `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30`
| `lockRenewalInSec` | N | Defines the frequency at which buffered message locks will be renewed. Default: `20`. | `20`
@ -142,6 +142,8 @@ To set Azure Service Bus metadata when sending a message, set the query paramete
> **NOTE:** If the `metadata.SessionId` property is not set but the topic requires sessions then an empty session id will be used.
> **NOTE:** The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats.
### Receiving a message with metadata
When Dapr calls your application, it will attach Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata.

View File

@ -1,7 +1,7 @@
---
type: docs
title: "GCP Pub/Sub"
linkTitle: "GCP Pub/Sub"
title: "GCP"
linkTitle: "GCP"
description: "Detailed documentation on the GCP Pub/Sub component"
aliases:
- "/operations/components/setup-pubsub/supported-pubsub/setup-gcp/"
@ -10,7 +10,7 @@ aliases:
## Create a Dapr component
To setup GCP pubsub create a component of type `pubsub.gcp.pubsub`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration
To set up GCP pub/sub, create a component of type `pubsub.gcp.pubsub`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -72,7 +72,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| projectId | Y | GCP project id| `myproject-123`
| endpoint | N | GCP endpoint for the component to use. Only used for local development (for example) with [GCP Pub/Sub Emulator](https://cloud.google.com/pubsub/docs/emulator). The `endpoint` is unnecessary when running against the GCP production API. | `"http://localhost:8085"`
| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the Dapr runtime will set it to the Dapr application ID. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID |
| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID |
| identityProjectId | N | If the GCP pubsub project is different from the identity project, specify the identity project using this attribute | `"myproject-123"`
| privateKeyId | N | If using explicit credentials, this field should contain the `private_key_id` field from the service account json document | `"my-private-key"`
| privateKey | N | If using explicit credentials, this field should contain the `private_key` field from the service account json | `-----BEGIN PRIVATE KEY-----MIIBVgIBADANBgkqhkiG9w0B`

View File

@ -1,13 +1,13 @@
---
type: docs
title: "In Memory"
linkTitle: "In Memory"
title: "In-memory"
linkTitle: "In-memory"
description: "Detailed documentation on the In Memory pubsub component"
aliases:
- "/operations/components/setup-pubsub/supported-pubsub/setup-inmemory/"
---
The In Memory pub/sub component is useful for development purposes and works inside of a single machine boundary.
The in-memory pub/sub component operates within a single Dapr sidecar. This is primarily meant for development purposes. State is not replicated across multiple sidecars and is lost when the Dapr sidecar is restarted.
## Component format
@ -25,6 +25,7 @@ spec:
> Note: in-memory does not require any specific metadata for the component to work, however spec.metadata is a required field.
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}}) in the Related links section
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components
- [Pub/Sub building block]({{< ref pubsub >}})

View File

@ -8,10 +8,7 @@ aliases:
---
## Component format
To setup JetStream pubsub create a component of type `pubsub.jetstream`. See
[this guide]({{< ref
"howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to
create and apply a pubsub configuration.
To set up JetStream pub/sub, create a component of type `pubsub.jetstream`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -39,9 +36,9 @@ spec:
- name: streamName
value: "my-stream"
- name: durableName
value: "my-durable"
value: "my-durable-subscription"
- name: queueGroupName
value: "my-queue"
value: "my-queue-group"
- name: startSequence
value: 1
- name: startTime # In Unix format
@ -146,6 +143,31 @@ It is essential to create a NATS JetStream for a specific subject. For example,
nats -s localhost:4222 stream add myStream --subjects mySubject
```
## Example: Competing consumers pattern
Let's say you'd like each message to be processed by only one application or pod with the same app-id. Typically, the `consumerID` metadata spec helps you define competing consumers.
Since `consumerID` is not supported in NATS JetStream, you need to specify `durableName` and `queueGroupName` to achieve the competing consumers pattern. For example:
```yml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: pubsub
spec:
type: pubsub.jetstream
version: v1
metadata:
- name: name
value: "my-conn-name"
- name: streamName
value: "my-stream"
- name: durableName
value: "my-durable-subscription"
- name: queueGroupName
value: "my-queue-group"
```
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup KubeMQ pub/sub, create a component of type `pubsub.kubemq`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
To set up KubeMQ pub/sub, create a component of type `pubsub.kubemq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -24,6 +24,8 @@ spec:
value: localhost:50000
- name: store
value: false
- name: consumerID
value: channel1
```
## Spec metadata fields
@ -32,6 +34,7 @@ spec:
|-------------------|:--------:|-----------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
| address | Y | Address of the KubeMQ server | `"localhost:50000"` |
| store | N | type of pubsub, true: pubsub persisted (EventsStore), false: pubsub in-memory (Events) | `true` or `false` (default is `false`) |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| clientID | N | Name for client id connection | `sub-client-12345` |
| authToken | N | Auth JWT token for connection Check out [KubeMQ Authentication](https://docs.kubemq.io/learn/access-control/authentication) | `ew...` |
| group | N | Subscriber group for load balancing | `g1` |

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup MQTT pubsub create a component of type `pubsub.mqtt`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration
To set up MQTT pub/sub, create a component of type `pubsub.mqtt`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -28,6 +28,8 @@ spec:
value: "false"
- name: cleanSession
value: "false"
- name: consumerID
value: "channel1"
```
{{% alert title="Warning" color="warning" %}}
@ -62,7 +64,7 @@ There is a crucial difference between the two ways of retries:
### Communication using TLS
To configure communication using TLS, ensure that the MQTT broker (e.g. mosquitto) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
To configure communication using TLS, ensure that the MQTT broker (for example, mosquitto) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -10,7 +10,7 @@ aliases:
## Component format
To setup a MQTT3 pubsub create a component of type `pubsub.mqtt3`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration
To set up a MQTT3 pub/sub, create a component of type `pubsub.mqtt3`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -30,6 +30,8 @@ spec:
value: "false"
- name: qos
value: "1"
- name: consumerID
value: "channel1"
```
{{% alert title="Warning" color="warning" %}}
@ -51,7 +53,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
### Communication using TLS
To configure communication using TLS, ensure that the MQTT broker (e.g. emqx) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
To configure communication using TLS, ensure that the MQTT broker (for example, emqx) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example:
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -8,7 +8,7 @@ aliases:
---
## Component format
To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See [the how-to guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -25,6 +25,8 @@ spec:
value: "clusterId"
- name: concurrencyMode
value: parallel
- name: consumerID # Optional. If not supplied, runtime will create one.
value: "channel1"
# below are subscription configuration.
- name: subscriptionType
value: <REPLACE-WITH-SUBSCRIPTION-TYPE> # Required. Allowed values: topic, queue.
@ -66,6 +68,7 @@ NATS Streaming has been [deprecated](https://github.com/nats-io/nats-streaming-s
| natsURL | Y | NATS server address URL | "`nats://localhost:4222`"|
| natsStreamingClusterID | Y | NATS cluster ID |`"clusterId"`|
| subscriptionType | Y | Subscription type. Allowed values `"topic"`, `"queue"` | `"topic"` |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| ackWaitTime | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"300ms"`|
| maxInFlight | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"25"` |
| durableSubscriptionName | N | [Durable subscriptions](https://docs.nats.io/developing-with-nats-streaming/durables) identification name. | `"my-durable"`|

View File

@ -9,7 +9,9 @@ aliases:
## Component format
To setup Apache Pulsar pubsub create a component of type `pubsub.pulsar`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For more information on Apache Pulsar [read the docs](https://pulsar.apache.org/docs/en/concepts-overview/)
To set up Apache Pulsar pub/sub, create a component of type `pubsub.pulsar`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
For more information on Apache Pulsar, [read the official docs](https://pulsar.apache.org/docs/en/concepts-overview/).
```yaml
apiVersion: dapr.io/v1alpha1
@ -29,7 +31,7 @@ spec:
- name: token
value: "eyJrZXlJZCI6InB1bHNhci1wajU0cXd3ZHB6NGIiLCJhbGciOiJIUzI1NiJ9.eyJzd"
- name: consumerID
value: "topic1"
value: "channel1"
- name: namespace
value: "default"
- name: persistent
@ -60,6 +62,11 @@ spec:
}
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a [secret store for the secrets]({{< ref component-secrets.md >}}). This component supports storing the `token` parameter and any other sensitive parameter and data as Kubernetes Secrets.
{{% /alert %}}
## Spec metadata fields
| Field | Required | Details | Example |
@ -68,7 +75,7 @@ spec:
| enableTLS | N | Enable TLS. Default: `"false"` | `"true"`, `"false"` |
| token | N | Enable Authentication. | [How to create pulsar token](https://pulsar.apache.org/docs/en/security-jwt/#generate-tokens)|
| tenant | N | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. Default: `"public"` | `"public"` |
| consumerID | N | Used to set the subscription name or consumer ID. | `"topic1"`
| consumerID | N | Used to set the subscription name or consumer ID. | `"channel1"`
| namespace | N | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Default: `"default"` | `"default"`
| persistent | N | Pulsar supports two kinds of topics: [persistent](https://pulsar.apache.org/docs/en/concepts-architecture-overview#persistent-storage) and [non-persistent](https://pulsar.apache.org/docs/en/concepts-messaging/#non-persistent-topics). With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topics is not persisted to storage disks.
| disableBatching | N | disable batching.When batching enabled default batch delay is set to 10 ms and default batch size is 1000 messages,Setting `disableBatching: true` will make the producer to send messages individually. Default: `"false"` | `"true"`, `"false"`|
@ -94,8 +101,8 @@ When invoking the Pulsar pub/sub, it's possible to provide an optional delay que
These optional parameter names are `metadata.deliverAt` or `metadata.deliverAfter`:
- `deliverAt`: Delay message to deliver at a specified time (RFC3339 format), e.g. `"2021-09-01T10:00:00Z"`
- `deliverAfter`: Delay message to deliver after a specified amount of time, e.g.`"4h5m3s"`
- `deliverAt`: Delay message to deliver at a specified time (RFC3339 format); for example, `"2021-09-01T10:00:00Z"`
- `deliverAfter`: Delay message to deliver after a specified amount of time; for example,`"4h5m3s"`
Examples:

View File

@ -9,6 +9,8 @@ aliases:
## Component format
To set up RabbitMQ pub/sub, create a component of type `pubsub.rabbitmq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
@ -73,7 +75,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| hostname | N* | The RabbitMQ hostname. *Mutally exclusive with connectionString field | `localhost` |
| username | N* | The RabbitMQ username. *Mutally exclusive with connectionString field | `username` |
| password | N* | The RabbitMQ password. *Mutally exclusive with connectionString field | `password` |
| consumerID | N | Consumer ID a.k.a consumer tag organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. |
| durable | N | Whether or not to use [durable](https://www.rabbitmq.com/queues.html#durability) queues. Defaults to `"false"` | `"true"`, `"false"`
| deletedWhenUnused | N | Whether or not the queue should be configured to [auto-delete](https://www.rabbitmq.com/queues.html) Defaults to `"true"` | `"true"`, `"false"`
| autoAck | N | Whether or not the queue consumer should [auto-ack](https://www.rabbitmq.com/confirms.html) messages. Defaults to `"false"` | `"true"`, `"false"`
@ -87,7 +89,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| maxLen | N | The maximum number of messages of a queue and its dead letter queue (if dead letter enabled). If both `maxLen` and `maxLenBytes` are set then both will apply; whichever limit is hit first will be enforced. Defaults to no limit. | `"1000"` |
| maxLenBytes | N | Maximum length in bytes of a queue and its dead letter queue (if dead letter enabled). If both `maxLen` and `maxLenBytes` are set then both will apply; whichever limit is hit first will be enforced. Defaults to no limit. | `"1048576"` |
| exchangeKind | N | Exchange kind of the rabbitmq exchange. Defaults to `"fanout"`. | `"fanout"`,`"topic"` |
| saslExternal | N | With TLS, should the username be taken from an additional field (e.g. CN.) See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` |
| saslExternal | N | With TLS, should the username be taken from an additional field (for example, CN). See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` |
| caCert | Required for using TLS | Input/Output | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
| clientCert | Required for using TLS | Input/Output | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
| clientKey | Required for using TLS | Input/Output | TLS client key in PEM format. Must be used with `clientCert`. Can be `secretKeyRef` to use a secret reference. | `"-----BEGIN RSA PRIVATE KEY-----\n<base64-encoded PKCS8>\n-----END RSA PRIVATE KEY-----"`

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Redis Streams pubsub create a component of type `pubsub.redis`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up Redis Streams pub/sub, create a component of type `pubsub.redis`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -8,7 +8,7 @@ aliases:
---
## Component format
To setup RocketMQ pubsub, create a component of type `pubsub.rocketmq`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To set up RocketMQ pub/sub, create a component of type `pubsub.rocketmq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -25,6 +25,8 @@ spec:
value: dapr-rocketmq-test-g-c
- name: producerGroup
value: dapr-rocketmq-test-g-p
- name: consumerID
value: topic
- name: nameSpace
value: dapr-test
- name: nameServer
@ -47,6 +49,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| instanceName | N | Instance name | `time.Now().String()` | `dapr-rocketmq-test` |
| consumerGroup | N | Consumer group name. Recommend. If `producerGroup` is `null``groupName` is used. | | `dapr-rocketmq-test-g-c ` |
| producerGroup (consumerID) | N | Producer group name. Recommended. If `producerGroup` is `null``consumerID` is used. If `consumerID` also is null, `groupName` is used. | | `dapr-rocketmq-test-g-p` |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| groupName | N | Consumer/Producer group name. **Depreciated**. | | `dapr-rocketmq-test-g` |
| nameSpace | N | RocketMQ namespace | | `dapr-rocketmq` |
| nameServerDomain | N | RocketMQ name server domain | | `https://my-app.net:8080/nsaddr` |

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Solace-AMQP pub/sub, create a component of type `pubsub.solace.amqp`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
To set up Solace-AMQP pub/sub, create a component of type `pubsub.solace.amqp`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -26,6 +26,8 @@ spec:
value: 'default'
- name: password
value: 'default'
- name: consumerID
value: 'channel1'
```
{{% alert title="Warning" color="warning" %}}
@ -39,6 +41,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| url | Y | Address of the AMQP broker. Can be `secretKeyRef` to use a secret reference. <br> Use the **`amqp://`** URI scheme for non-TLS communication. <br> Use the **`amqps://`** URI scheme for TLS communication. | `"amqp://host.domain[:port]"`
| username | Y | The username to connect to the broker. Only required if anonymous is not specified or set to `false` .| `default`
| password | Y | The password to connect to the broker. Only required if anonymous is not specified or set to `false`. | `default`
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| anonymous | N | To connect to the broker without credential validation. Only works if enabled on the broker. A username and password would not be required if this is set to `true`. | `true`
| caCert | Required for using TLS | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
| clientCert | Required for using TLS | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`

View File

@ -43,6 +43,15 @@ The above example uses secrets as plain strings. It is recommended to use a loca
| accessKeySecret | Y | The AlibabaCloud Access Key Secret to access this resource | `"accessKeySecret"` |
| securityToken | N | The AlibabaCloud Security Token to use | `"securityToken"` |
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api.md#query-parameters" >}}) can be provided when retrieving secrets from this secret store:
Query Parameter | Description
--------- | -----------
`metadata.version_id` | Version for the given secret key
`metadata.path` | (For bulk requests only) The path from the metadata. If not set, defaults to root path (all secrets).
## Create an AlibabaCloud OOS Parameter Store instance
Setup AlibabaCloud OOS Parameter Store using the AlibabaCloud documentation: https://www.alibabacloud.com/help/en/doc-detail/186828.html.

View File

@ -48,6 +48,15 @@ The above example uses secrets as plain strings. It is recommended to use a loca
When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using.
{{% /alert %}}
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided when retrieving secrets from this secret store:
Query Parameter | Description
--------- | -----------
`metadata.version_id` | Version for the given secret key.
`metadata.version_stage` | Version stage for the given secret key.
## Create an AWS Secrets Manager instance
Setup AWS Secrets Manager using the AWS documentation: https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html.

View File

@ -53,6 +53,15 @@ The Azure Key Vault secret store component supports authentication with Azure AD
Additionally, you must provide the authentication fields as explained in the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document.
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided when retrieving secrets from this secret store:
Query Parameter | Description
--------- | -----------
`metadata.version_id` | Version for the given secret key.
`metadata.maxresults` | (For bulk requests only) Number of secrets to return, after which the request will be truncated.
## Example
### Prerequisites

View File

@ -61,6 +61,14 @@ The above example uses secrets as plain strings. It is recommended to use a loca
| client_x509_cert_url | N | The certificate URL for the client | `"https://www.googleapis.com/robot/v1/metadata/x509/<project-name>.iam.gserviceaccount.com"`|
| private_key | Y | The private key for authentication | `"privateKey"` |
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided to the GCP Secret Manager component:
Query Parameter | Description
--------- | -----------
`metadata.version_id` | Version for the given secret key.
## Setup GCP Secret Manager instance
Setup GCP Secret Manager using the GCP documentation: https://cloud.google.com/secret-manager/docs/quickstart.

View File

@ -66,6 +66,14 @@ The above example uses secrets as plain strings. It is recommended to use a loca
| enginePath | N | The [engine](https://www.vaultproject.io/api-docs/secret/kv/kv-v2) path in vault. Defaults to `"secret"` | `"kv"`, `"any"` |
| vaultValueType | N | Vault value type. `map` means to parse the value into `map[string]string`, `text` means to use the value as a string. 'map' sets the `multipleKeyValuesPerSecret` behavior. `text` makes Vault behave as a secret store with name/value semantics. Defaults to `"map"` | `"map"`, `"text"` |
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided to Hashicorp Vault secret store component:
Query Parameter | Description
--------- | -----------
`metadata.version_id` | Version for the given secret key.
## Setup Hashicorp Vault instance
{{< tabs "Self-Hosted" "Kubernetes" >}}

View File

@ -40,6 +40,15 @@ The above example uses secrets as plain strings. It is recommended to use a loca
| accessKey | Y | The HuaweiCloud Access Key to access this resource | `"accessKey"` |
| secretAccessKey | Y | The HuaweiCloud Secret Access Key to access this resource | `"secretAccessKey"` |
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided when retrieving secrets from this secret store:
Query Parameter | Description
--------- | -----------
`metadata.version_id` | Version for the given secret key.
## Setup HuaweiCloud Cloud Secret Management Service (CSMS) instance
Setup HuaweiCloud Cloud Secret Management Service (CSMS) using the HuaweiCloud documentation: https://support.huaweicloud.com/intl/en-us/usermanual-dew/dew_01_9993.html.

View File

@ -34,6 +34,14 @@ spec:
## Spec metadata fields
For the Kubernetes secret store component, there are no metadata attributes.
## Optional per-request metadata properties
The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided to Kubernetes secret store component:
Query Parameter | Description
--------- | -----------
`metadata.namespace`| The namespace of the secret. If not specified, the namespace of the pod is used.
## Related links
- [Secrets building block]({{< ref secrets >}})
- [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}})

View File

@ -1,20 +1,16 @@
---
type: docs
title: "In Memory"
linkTitle: "In Memory"
description: "Detailed documentation on the In Memory state component"
title: "In-memory"
linkTitle: "In-memory"
description: "Detailed documentation on the in-memory state component"
aliases:
- "/operations/components/setup-state-store/supported-state-stores/setup-inmemory/"
---
The In Memory state store component is useful for development purposes and works inside of a single machine boundary.
{{% alert title="Warning" color="warning" %}}
This component **shouldn't be used for production**. It is developer only and will never be stable. If you come across a scenario and want to use it in production, you can submit an issue and discuss it with the community.
{{% /alert %}}
The in-memory state store component maintains state in the Dapr sidecar's memory. This is primarily meant for development purposes. State is not replicated across multiple sidecars and is lost when the Dapr sidecar is restarted.
## Component format
To setup in-memory state store, create a component of type `state.in-memory`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
```yaml
@ -31,6 +27,7 @@ spec:
> Note: While in-memory does not require any specific metadata for the component to work, `spec.metadata` is a required field.
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- Learn [how to create and configure state store components]({{< ref howto-get-save-state.md >}})
- Read more about the [state management building block]({{< ref state-management >}})

View File

@ -1,15 +1,17 @@
---
type: docs
title: "SQL Server"
linkTitle: "SQL Server"
description: Detailed information on the SQL Server state store component
title: "Microsoft SQL Server & Azure SQL"
linkTitle: "Microsoft SQL Server & Azure SQL"
description: Detailed information on the Microsoft SQL Server state store component
aliases:
- "/operations/components/setup-state-store/supported-state-stores/setup-sqlserver/"
---
## Component format
To setup SQL Server state store create a component of type `state.sqlserver`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
This state store component can be used with both [Microsoft SQL Server](https://learn.microsoft.com/sql/) and [Azure SQL](https://learn.microsoft.com/azure/azure-sql/).
To set up this state store, create a component of type `state.sqlserver`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
```yaml
@ -21,30 +23,42 @@ spec:
type: state.sqlserver
version: v1
metadata:
- name: connectionString
value: <REPLACE-WITH-CONNECTION-STRING> # Required.
- name: tableName
value: <REPLACE-WITH-TABLE-NAME> # Optional. defaults to "state"
- name: keyType
value: <REPLACE-WITH-KEY-TYPE> # Optional. defaults to "string"
- name: keyLength
value: <KEY-LENGTH> # Optional. defaults to 200. You be used with "string" keyType
- name: schema
value: <SCHEMA> # Optional. defaults to "dbo"
- name: indexedProperties
value: <INDEXED-PROPERTIES> # Optional. List of IndexedProperties.
- name: metadataTableName # Optional. Name of the table where to store metadata used by Dapr
value: "dapr_metadata"
- name: cleanupIntervalInSeconds # Optional. Cleanup interval in seconds, to remove expired rows
value: 300
# Authenticate using SQL Server credentials
- name: connectionString
value: |
Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;
# Authenticate with Azure AD (Azure SQL only)
# "useAzureAD" be set to "true"
- name: useAzureAD
value: true
# Connection string or URL of the Azure SQL database, optionally containing the database
- name: connectionString
value: |
sqlserver://myServerName.database.windows.net:1433?database=myDataBase
# Other optional fields (listing default values)
- name: tableName
value: "state"
- name: metadataTableName
value: "dapr_metadata"
- name: schema
value: "dbo"
- name: keyType
value: "string"
- name: keyLength
value: "200"
- name: indexedProperties
value: ""
- name: cleanupIntervalInSeconds
value: "3600"
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
{{% /alert %}}
If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#configuring-state-store-for-actors" >}}), append the following to the yaml.
If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#configuring-state-store-for-actors" >}}), append the following to the metadata:
```yaml
- name: actorStateStore
@ -53,24 +67,43 @@ If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#co
## Spec metadata fields
### Authenticate using SQL Server credentials
The following metadata options are **required** to authenticate using SQL Server credentials. This is supported on both SQL Server and Azure SQL.
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `connectionString` | Y | The connection string used to connect.<br>If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"` |
### Authenticate using Azure AD
Authenticating with Azure AD is supported with Azure SQL only. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity.
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` |
| `connectionString` | Y | The connection string or URL of the Azure SQL database, **without credentials**.<br>If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"sqlserver://myServerName.database.windows.net:1433?database=myDataBase"` |
| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` |
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
| `azureClientSecret` | N | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` |
### Other metadata options
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| connectionString | Y | The connection string used to connect. If the connection string contains the database it must already exist. If the database is omitted a default database named `"Dapr"` is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"`
| tableName | N | The name of the table to use. Alpha-numeric with underscores. Defaults to `"state"` | `"table_name"`
| keyType | N | The type of key used. Defaults to `"string"` | `"string"`
| keyLength | N | The max length of key. Used along with `"string"` keytype. Defaults to `"200"` | `"200"`
| schema | N | The schema to use. Defaults to `"dbo"` | `"dapr"`,`"dbo"`
| indexedProperties | N | List of IndexedProperties. | `'[{"column": "transactionid", "property": "id", "type": "int"}, {"column": "customerid", "property": "customer", "type": "nvarchar(100)"}]'`
| actorStateStore | N | Indicates that Dapr should configure this component for the actor state store ([more information]({{< ref "state_api.md#configuring-state-store-for-actors" >}})). | `"true"`
| metadataTableName | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. | `"dapr_metadata"`
| cleanupIntervalInSeconds | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `3600` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `1800`, `-1`
| `tableName` | N | The name of the table to use. Alpha-numeric with underscores. Defaults to `"state"` | `"table_name"`
| `metadataTableName` | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. | `"dapr_metadata"`
| `keyType` | N | The type of key used. Supported values: `"string"` (default), `"uuid"`, `"integer"`.| `"string"`
| `keyLength` | N | The max length of key. Ignored if "keyType" is not `string`. Defaults to `"200"` | `"200"`
| `schema` | N | The schema to use. Defaults to `"dbo"` | `"dapr"`,`"dbo"`
| `indexedProperties` | N | List of indexed properties, as a string containing a JSON document. | `'[{"column": "transactionid", "property": "id", "type": "int"}, {"column": "customerid", "property": "customer", "type": "nvarchar(100)"}]'`
| `actorStateStore` | N | Indicates that Dapr should configure this component for the actor state store ([more information]({{< ref "state_api.md#configuring-state-store-for-actors" >}})). | `"true"`
| `cleanupIntervalInSeconds` | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `"3600"` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `"1800"`, `"-1"`
## Create Azure SQL instance
## Create a Microsoft SQL Server/Azure SQL instance
[Follow the instructions](https://docs.microsoft.com/azure/azure-sql/database/single-database-create-quickstart?view=azuresql&tabs=azure-portal) from the Azure documentation on how to create a SQL database. The database must be created before Dapr consumes it.
**Note: SQL Server state store also supports SQL Server running on VMs and in Docker.**
[Follow the instructions](https://docs.microsoft.com/azure/azure-sql/database/single-database-create-quickstart?view=azuresql&tabs=azure-portal) from the Azure documentation on how to create a SQL database. The database must be created before Dapr consumes it.
In order to setup SQL Server as a state store, you need the following properties:
@ -104,6 +137,7 @@ CREATE CLUSTERED INDEX expiredate_idx ON state(ExpireDate ASC)
```
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components
- [State management building block]({{< ref state-management >}})

View File

@ -12,7 +12,7 @@ The following table lists the environment variables used by the Dapr runtime, CL
| -------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| APP_ID | Your application | The id for your application, used for service discovery |
| APP_PORT | Dapr sidecar | The port your application is listening on |
| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. |
| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. |
| DAPR_HTTP_PORT | Your application | The HTTP port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. |
| DAPR_GRPC_PORT | Your application | The gRPC port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. |
| DAPR_API_TOKEN | Dapr sidecar | The token used for Dapr API authentication for requests from the application. [Enable API token authentication in Dapr]({{< ref api-token >}}). |
@ -24,4 +24,6 @@ The following table lists the environment variables used by the Dapr runtime, CL
| DAPR_HELM_REPO_PASSWORD | A password for a private Helm chart |The password required to access the private Dapr helm chart. If it can be accessed publicly, this env variable does not need to be set|
| OTEL_EXPORTER_OTLP_ENDPOINT | OpenTelemetry Tracing | Sets the Open Telemetry (OTEL) server address, turns on tracing. (Example: `http://localhost:4318`) |
| OTEL_EXPORTER_OTLP_INSECURE | OpenTelemetry Tracing | Sets the connection to the endpoint as unencrypted. (`true`, `false`) |
| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) |
| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) |
| DAPR_COMPONENTS_SOCKETS_FOLDER | Dapr runtime and the .NET, Go, and Java pluggable component SDKs | The location or path where Dapr looks for Pluggable Components Unix Domain Socket files. If unset this location defaults to `/tmp/dapr-components-sockets` |
| DAPR_COMPONENTS_SOCKETS_EXTENSION | .NET and Java pluggable component SDKs | A per-SDK configuration that indicates the default file extension applied to socket files created by the SDKs. Not a Dapr-enforced behavior. |

View File

@ -1,9 +1,9 @@
---
type: docs
title: "Component schema"
linkTitle: "Component schema"
weight: 100
description: "The basic schema for a Dapr component"
title: "Component spec"
linkTitle: "Component"
weight: 1000
description: "The basic spec for a Dapr component"
---
Dapr defines and registers components using a [CustomResourceDefinition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). All components are defined as a CRD and can be applied to any hosting environment where Dapr is running, not just Kubernetes.
@ -26,7 +26,7 @@ spec:
value: [METADATA-VALUE]
```
## Fields
## Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|

View File

@ -0,0 +1,105 @@
---
type: docs
title: "Configuration spec"
linkTitle: "Configuration"
description: "The basic spec for a Dapr Configuration resource"
weight: 5000
---
The `Configuration` is a Dapr resource that is used to configure the Dapr sidecar, control-plane, and others.
## Sidecar format
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: <REPLACE-WITH-NAME>
namespace: <REPLACE-WITH-NAMESPACE>
spec:
api:
allowed:
- name: <REPLACE-WITH-API>
version: <VERSION>
protocol: <HTTP-OR-GRPC>
tracing:
samplingRate: <REPLACE-WITH-INTEGER>
stdout: true
otel:
endpointAddress: <REPLACE-WITH-ENDPOINT-ADDRESS>
isSecure: false
protocol: <HTTP-OR-GRPC>
httpPipeline: # for incoming http calls
handlers:
- name: <HANDLER-NAME>
type: <HANDLER-TYPE>
appHttpPipeline: # for outgoing http calls
handlers:
- name: <HANDLER-NAME>
type: <HANDLER-TYPE>
secrets:
scopes:
- storeName: <NAME-OF-SCOPED-STORE>
defaultAccess: <ALLOW-OR-DENY>
deniedSecrets: <REPLACE-WITH-DENIED-SECRET>
components:
deny:
- <COMPONENT-TO-DENY>
accessControl:
defaultAction: <ALLOW-OR-DENY>
trustDomain: <REPLACE-WITH-TRUST-DOMAIN>
policies:
- appId: <APP-NAME>
defaultAction: <ALLOW-OR-DENY>
trustDomain: <REPLACE-WITH-TRUST-DOMAIN>
namespace: "default"
operations:
- name: <OPERATION-NAME>
httpVerb: ['POST', 'GET']
action: <ALLOW-OR-DENY>
```
### Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| accessControl | N | Applied to Dapr sidecar for the called application. Enables the configuration of policies that restrict what operations calling applications can perform (via service invocation) on the called appliaction. | [Learn more about the `accessControl` configuration.]({{< ref invoke-allowlist.md >}}) |
| api | N | Used to enable only the Dapr sidecar APIs used by the application. | [Learn more about the `api` configuration.]({{< ref api-allowlist.md >}}) |
| httpPipeline | N | Configure API middleware pipelines | [Middleware pipeline configuration overview]({{< ref "configuration-overview.md#middleware" >}})<br>[Learn more about the `httpPipeline` configuration.]({{< ref "middleware.md#configure-api-middleware-pipelines" >}}) |
| appHttpPipeline | N | Configure application middleware pipelines | [Middleware pipeline configuration overview]({{< ref "configuration-overview.md#middleware" >}})<br>[Learn more about the `appHttpPipeline` configuration.]({{< ref "middleware.md#configure-app-middleware-pipelines" >}}) |
| components | N | Used to specify a denylist of component types that can't be initialized. | [Learn more about the `components` configuration.]({{< ref "configuration-overview.md#disallow-usage-of-certain-component-types" >}}) |
| features | N | Defines the preview features that are enabled/disabled. | [Learn more about the `features` configuration.]({{< ref preview-features.md >}}) |
| logging | N | Configure how logging works in the Dapr runtime. | [Learn more about the `logging` configuration.]({{< ref "configuration-overview.md#logging" >}}) |
| metrics | N | Enable or disable metrics for an application. | [Learn more about the `metrics` configuration.]({{< ref "configuration-overview.md#metrics" >}}) |
| nameResolution | N | Name resolution configuration spec for the service invocation building block. | [Learn more about the `nameResolution` configuration per components.]({{< ref supported-name-resolution.md >}}) |
| secrets | N | Limit the secrets to which your Dapr application has access. | [Learn more about the `secrets` configuration.]({{< ref secret-scope.md >}}) |
| tracing | N | Turns on tracing for an application. | [Learn more about the `tracing` configuration.]({{< ref "configuration-overview.md#tracing" >}}) |
## Control-plane format
The `daprsystem` configuration file installed with Dapr applies global settings and is only set up when Dapr is deployed to Kubernetes.
```yml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
namespace: default
spec:
mtls:
enabled: true
allowedClockSkew: 15m
workloadCertTTL: 24h
```
### Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| mtls | N | Defines the mTLS configuration | `allowedClockSkew: 15m`<br>`workloadCertTTL:24h`<br>[Learn more about the `mtls` configuration.]({{< ref "configuration-overview.md#mtls-mutual-tls" >}}) |
## Related links
- [Learn more about how to use configuration specs]({{< ref configuration-overview.md >}})

View File

@ -1,16 +1,16 @@
---
type: docs
title: "HTTPEndpoint spec"
linkTitle: "HTTPEndpoint spec"
description: "The HTTPEndpoint resource spec"
weight: 300
linkTitle: "HTTPEndpoint"
description: "The basic spec for a Dapr HTTPEndpoint resource"
weight: 4000
aliases:
- "/operations/httpEndpoints/"
---
The `HTTPEndpoint` is a Dapr resource that is used to enable the invocation of non-Dapr endpoints from a Dapr application.
## HTTPEndpoint format
## Format
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -0,0 +1,65 @@
---
type: docs
title: "Resiliency spec"
linkTitle: "Resiliency"
weight: 3000
description: "The basic spec for a Dapr resiliency resource"
---
The `Resiliency` Dapr resource allows you to define and apply fault tolerance resiliency policies. Resiliency specs are applied when the Dapr sidecar starts.
## Format
```yml
apiVersion: dapr.io/v1alpha1
kind: Resiliency
metadata:
name: <REPLACE-WITH-RESOURCE-NAME>
version: v1alpha1
scopes:
- <REPLACE-WITH-SCOPED-APPIDS>
spec:
policies: # Required
timeouts: # Replace with any unique name
timeoutName: <REPLACE-WITH-TIME-VALUE>
retries:
retryName: # Replace with any unique name
policy: <REPLACE-WITH-VALUE>
duration: <REPLACE-WITH-VALUE>
maxInterval: <REPLACE-WITH-VALUE>
maxRetries: <REPLACE-WITH-VALUE>
circuitBreakers:
circuitBreakerName: # Replace with any unique name
maxRequests: <REPLACE-WITH-VALUE>
timeout: <REPLACE-WITH-VALUE>
trip: <REPLACE-WITH-CONSECUTIVE-FAILURE-VALUE>
targets: # Required
apps:
appID: # Replace with scoped app ID
timeout: <REPLACE-WITH-TIMEOUT-NAME>
retry: <REPLACE-WITH-RETRY-NAME>
circuitBreaker: <REPLACE-WITH-CIRCUIT-BREAKER-NAME>
actors:
myActorType:
timeout: <REPLACE-WITH-TIMEOUT-NAME>
retry: <REPLACE-WITH-RETRY-NAME>
circuitBreaker: <REPLACE-WITH-CIRCUIT-BREAKER-NAME>
circuitBreakerCacheSize: <REPLACE-WITH-VALUE>
components:
componentName: # Replace with your component name
outbound:
timeout: <REPLACE-WITH-TIMEOUT-NAME>
retry: <REPLACE-WITH-RETRY-NAME>
circuitBreaker: <REPLACE-WITH-CIRCUIT-BREAKER-NAME>
```
## Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| policies | Y | The configuration of resiliency policies, including: <br><ul><li>`timeouts`</li><li>`retries`</li><li>`circuitBreakers`</li></ul> <br> [See more examples with all of the built-in policies]({{< ref policies.md >}}) | timeout: `general`<br>retry: `retryForever`<br>circuit breaker: `simpleCB` |
| targets | Y | The configuration for the applications, actors, or components that use the resiliency policies. <br>[See more examples in the resiliency targets guide]({{< ref targets.md >}}) | `apps` <br>`components`<br>`actors` |
## Related links
[Learn more about resiliency policies and targets]({{< ref resiliency-overview.md >}})

View File

@ -0,0 +1,88 @@
---
type: docs
title: "Subscription spec"
linkTitle: "Subscription"
weight: 2000
description: "The basic spec for a Dapr subscription"
---
The `Subscription` Dapr resource allows you to subscribe declaratively to a topic using an external component YAML file. This guide demonstrates two subscription API versions:
- `v2alpha` (default spec)
- `v1alpha1` (deprecated)
## `v2alpha1` format
The following is the basic `v2alpha1` spec for a `Subscription` resource. `v2alpha1` is the default spec for the subscription API.
```yml
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: <REPLACE-WITH-NAME>
spec:
version: v2alpha1
topic: <REPLACE-WITH-TOPIC-NAME> # Required
routes: # Required
- rules:
- match: <REPLACE-WITH-EVENT-TYPE>
path: <REPLACE-WITH-PATH>
pubsubname: <REPLACE-WITH-PUBSUB-NAME> # Required
deadlettertopic: <REPLACE-WITH-TOPIC-NAME> # Optional
bulksubscribe: # Optional
- enabled: <REPLACE-WITH-TOPIC-NAME>
- maxmessages: <REPLACE-WITH-TOPIC-NAME>
- maxawaitduration: <REPLACE-WITH-TOPIC-NAME>
scopes:
- <REPLACE-WITH-SCOPED-APPIDS>
```
### Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| topic | Y | The name of the topic to which your component subscribes. | `orders` |
| routes | Y | The routes configuration for this topic, including specifying the condition for sending a message to a specific path. Includes the following fields: <br><ul><li>match: _Optional._ The CEL expression used to match the event. If not specified, the route is considered the default. </li><li>path: The path for events that match this rule. </li></ul>The endpoint to which all topic messages are sent. | `match: event.type == "widget"` <br>`path: /widgets` |
| pubsubname | N | The name of your pub/sub component. | `pubsub` |
| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` |
| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` |
## `v1alpha1` format
The following is the basic version `v1alpha1` spec for a `Subscription` resource. `v1alpha1` is now deprecated.
```yml
apiVersion: dapr.io/v1alpha1
kind: Subscription
metadata:
name: <REPLACE-WITH-RESOURCE-NAME>
spec:
version: v1alpha1
topic: <REPLACE-WITH-TOPIC-NAME> # Required
route: <REPLACE-WITH-ROUTE-NAME> # Required
pubsubname: <REPLACE-WITH-PUBSUB-NAME> # Required
deadLetterTopic: <REPLACE-WITH-DEAD-LETTER-TOPIC-NAME> # Optional
bulkSubscribe: # Optional
- enabled: <REPLACE-WITH-BOOLEAN-VALUE>
- maxmessages: <REPLACE-WITH-VALUE>
- maxawaitduration: <REPLACE-WITH-VALUE>
scopes:
- <REPLACE-WITH-SCOPED-APPIDS>
```
### Spec fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| topic | Y | The name of the topic to which your component subscribes. | `orders` |
| route | Y | The endpoint to which all topic messages are sent. | `/checkout` |
| pubsubname | N | The name of your pub/sub component. | `pubsub` |
| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` |
| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` |
## Related links
- [Learn more about the declarative subscription method]({{< ref "subscription-methods.md#declarative-subscriptions" >}})
- [Learn more about dead letter topics]({{< ref pubsub-deadletter.md >}})
- [Learn more about routing messages]({{< ref "howto-route-messages.md#declarative-subscription" >}})
- [Learn more about bulk subscribing]({{< ref pubsub-bulk.md >}})

View File

@ -1,6 +1,6 @@
- component: In-memory
link: setup-inmemory
state: Beta
state: Stable
version: v1
since: "1.7"
features:

View File

@ -20,7 +20,7 @@
etag: true
ttl: true
query: true
- component: Azure SQL Server
- component: Microsoft SQL Server
link: setup-sqlserver
state: Stable
version: v1

View File

@ -77,9 +77,9 @@
query: false
- component: In-memory
link: setup-inmemory
state: Developer-only
state: Stable
version: v1
since: "1.8"
since: "1.9"
features:
crud: true
transactions: true

View File

@ -1,19 +1,13 @@
<script src="/js/copy-code-button.js"></script>
{{ with .Site.Params.algolia_docsearch }}
<script src="https://cdn.jsdelivr.net/npm/docsearch.js@2.6.3/dist/cdn/docsearch.min.js"></script>
<script>
<script src="https://cdn.jsdelivr.net/npm/@docsearch/js@3"></script>
<script type="text/javascript">
docsearch({
// Your apiKey and indexName will be given to you once
// we create your config
apiKey: '54ae43aa28ce8f00c54c8d5f544d29b9',
indexName: 'crawler_dapr',
container: '#docsearch',
appId: 'O0QLQGNF38',
// Replace inputSelector with a CSS selector
// matching your search input
inputSelector: '.td-search-input',
// Set debug to true to inspect the dropdown
debug: false,
apiKey: '54ae43aa28ce8f00c54c8d5f544d29b9',
indexName: 'daprdocs',
});
</script>
{{ end }}
<script src="/js/copy-code-button.js"></script>

View File

@ -1,3 +1,3 @@
{{ with .Site.Params.algolia_docsearch }}
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@docsearch/css@3" />
{{ end }}

View File

@ -1,28 +1,53 @@
{{ if .Path }}
{{ $pathFormatted := replace .Path "\\" "/" }}
{{ $gh_repo := ($.Param "github_repo") }}
{{ $gh_subdir := ($.Param "github_subdir") }}
{{ $gh_project_repo := ($.Param "github_project_repo") }}
{{ $gh_branch := (default "master" ($.Param "github_branch")) }}
{{ if $gh_repo }}
<div class="td-page-meta ml-2 pb-1 pt-2 mb-0">
{{ $gh_repo_path := printf "%s/content/%s" $gh_branch $pathFormatted }}
{{ if and ($gh_subdir) (.Site.Language.Lang) }}
{{ $gh_repo_path = printf "%s/%s/content/%s/%s" $gh_branch $gh_subdir ($.Site.Language.Lang) $pathFormatted }}
{{ else if .Site.Language.Lang }}
{{ $gh_repo_path = printf "%s/content/%s/%s" $gh_branch ($.Site.Language.Lang) $pathFormatted }}
{{ else if $gh_subdir }}
{{ $gh_repo_path = printf "%s/%s/content/%s" $gh_branch $gh_subdir $pathFormatted }}
{{ end }}
{{ $editURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }}
{{ $createURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }}
{{ $issuesURL := printf "%s/issues/new/choose" $gh_repo}}
{{ $newPageStub := resources.Get "stubs/new-page-template.md" }}
{{ $newPageQS := querify "value" $newPageStub.Content "filename" "change-me.md" | safeURL }}
{{ $newPageURL := printf "%s/new/%s?%s" $gh_repo $gh_repo_path $newPageQS }}
{{ if .File }}
{{ $pathFormatted := replace .File.Path "\\" "/" -}}
{{ $gh_repo := ($.Param "github_repo") -}}
{{ $gh_url := ($.Param "github_url") -}}
{{ $gh_subdir := ($.Param "github_subdir") -}}
{{ $gh_project_repo := ($.Param "github_project_repo") -}}
{{ $gh_branch := (default "main" ($.Param "github_branch")) -}}
<div class="td-page-meta ms-2 pb-1 pt-2 mb-0">
{{ if $gh_url -}}
{{ warnf "Warning: use of `github_url` is deprecated. For details see https://www.docsy.dev/docs/adding-content/repository-links/#github_url-optional" -}}
<a href="{{ $gh_url }}" target="_blank"><i class="fa-solid fa-pen-to-square fa-fw"></i> {{ T "post_edit_this" }}</a>
{{ else if $gh_repo -}}
{{ $gh_repo_path := printf "%s/content/%s" $gh_branch $pathFormatted -}}
{{ if and ($gh_subdir) (.Site.Language.Lang) -}}
{{ $gh_repo_path = printf "%s/%s/content/%s/%s" $gh_branch $gh_subdir ($.Site.Language.Lang) $pathFormatted -}}
{{ else if .Site.Language.Lang -}}
{{ $gh_repo_path = printf "%s/content/%s/%s" $gh_branch ($.Site.Language.Lang) $pathFormatted -}}
{{ else if $gh_subdir -}}
{{ $gh_repo_path = printf "%s/%s/content/%s" $gh_branch $gh_subdir $pathFormatted -}}
{{ end -}}
<a href="{{ $editURL }}" target="_blank" rel="nofollow noopener noreferrer"><i class="fa fa-edit fa-fw"></i> {{ T "post_edit_this" }}</a>
<a href="{{ $issuesURL }}" target="_blank" rel="nofollow noopener noreferrer"><i class="fab fa-github fa-fw"></i> {{ T "post_create_issue" }}</a>
{{/* Adjust $gh_repo_path based on path_base_for_github_subdir */ -}}
{{ $ghs_base := $.Param "path_base_for_github_subdir" -}}
{{ $ghs_rename := "" -}}
{{ if reflect.IsMap $ghs_base -}}
{{ $ghs_rename = $ghs_base.to -}}
{{ $ghs_base = $ghs_base.from -}}
{{ end -}}
{{ with $ghs_base -}}
{{ $gh_repo_path = replaceRE . $ghs_rename $gh_repo_path -}}
{{ end -}}
{{ $viewURL := printf "%s/tree/%s" $gh_repo $gh_repo_path -}}
{{ $editURL := printf "%s/edit/%s" $gh_repo $gh_repo_path -}}
{{ $issuesURL := printf "%s/issues/new/choose" $gh_repo -}}
{{ $newPageStub := resources.Get "stubs/new-page-template.md" -}}
{{ $newPageQS := querify "value" $newPageStub.Content "filename" "change-me.md" | safeURL -}}
{{ $newPageURL := printf "%s/new/%s?%s" $gh_repo $gh_repo_path $newPageQS -}}
<a href="{{ $editURL }}" target="_blank" rel="nofollow noopener noreferrer"><i class="fa fa-edit fa-fw"></i> {{ T "post_edit_this" }}</a>
<a href="{{ $issuesURL }}" target="_blank" rel="nofollow noopener noreferrer"><i class="fab fa-github fa-fw"></i> {{ T "post_create_issue" }}</a>
{{ with $gh_project_repo -}}
{{ $project_issueURL := printf "%s/issues/new/choose" . -}}
<a href="{{ $project_issueURL }}" class="td-page-meta--project-issue" target="_blank" rel="noopener"><i class="fab fa-github fa-fw"></i> {{ T "post_create_project_issue" }}</a>
{{ end -}}
{{ end -}}
{{ with .CurrentSection.AlternativeOutputFormats.Get "print" -}}
<a id="print" href="{{ .Permalink | safeURL }}"><i class="fa-solid fa-print fa-fw"></i> {{ T "print_entire_section" }}</a>
{{ end }}
</div>
{{ end }}
{{ end }}
{{ end -}}

View File

@ -0,0 +1,30 @@
{{ if .Site.Params.gcs_engine_id -}}
<input type="search" class="form-control td-search-input" placeholder="&#xf002; {{ T "ui_search" }}" aria-label="{{ T "ui_search" }}" autocomplete="off">
{{ else if .Site.Params.algolia_docsearch -}}
<div id="docsearch"></div>
{{ else if .Site.Params.offlineSearch -}}
{{ $offlineSearchIndex := resources.Get "json/offline-search-index.json" | resources.ExecuteAsTemplate "offline-search-index.json" . -}}
{{ if hugo.IsProduction -}}
{{/* Use `md5` as finger print hash function to shorten file name to avoid `file name too long` error. */ -}}
{{ $offlineSearchIndex = $offlineSearchIndex | fingerprint "md5" -}}
{{ end -}}
{{ $offlineSearchLink := $offlineSearchIndex.RelPermalink -}}
<input
type="search"
class="form-control td-search-input"
placeholder="&#xf002; {{ T "ui_search" }}"
aria-label="{{ T "ui_search" }}"
autocomplete="off"
{{/*
The data attribute name of the json file URL must end with `src` since
Hugo's absurlreplacer requires `src`, `href`, `action` or `srcset` suffix for the attribute name.
If the absurlreplacer is not applied, the URL will start with `/`.
It causes the json file loading error when when relativeURLs is enabled.
https://github.com/google/docsy/issues/181
*/}}
data-offline-search-index-json-src="{{ $offlineSearchLink }}"
data-offline-search-base-href="/"
data-offline-search-max-results="{{ .Site.Params.offlineSearchMaxResults | default 10 }}"
>
{{ end -}}

View File

@ -1 +1 @@
{{- if .Get "short" }}1.11{{ else if .Get "long" }}1.11.0{{ else if .Get "cli" }}1.11.0{{ else }}1.11.0{{ end -}}
{{- if .Get "short" }}1.11{{ else if .Get "long" }}1.11.1{{ else if .Get "cli" }}1.11.0{{ else }}1.11.1{{ end -}}

View File

@ -71,7 +71,7 @@ spec:
spec:
containers:
- name: otel-collector
image: otel/opentelemetry-collector-contrib:0.50.0
image: otel/opentelemetry-collector-contrib:0.77.0
command:
- "/otelcol-contrib"
- "--config=/conf/otel-collector-config.yaml"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -1,49 +1,35 @@
function addCopyButtons(clipboard) {
document.querySelectorAll('pre > code').forEach(function(codeBlock) {
var button = document.createElement('button');
button.className = 'copy-code-button';
button.type = 'button';
button.innerText = 'Copy';
const highlightClass = document.querySelectorAll('.highlight');
button.addEventListener('click', function() {
clipboard.writeText(codeBlock.textContent).then(
function() {
button.blur();
highlightClass.forEach(element => {
const copyIcon = document.createElement('i');
copyIcon.classList.add('fas', 'fa-copy', 'copy-icon');
copyIcon.style.color = 'white';
copyIcon.style.display = 'none';
element.appendChild(copyIcon);
button.innerText = 'Copied!';
setTimeout(function() {
button.innerText = 'Copy';
}, 2000);
},
function(error) {
button.innerText = 'Error';
console.error(error);
}
);
});
element.addEventListener('mouseenter', () => {
copyIcon.style.display = 'inline';
});
var pre = codeBlock.parentNode;
if (pre.parentNode.classList.contains('highlight')) {
var highlight = pre.parentNode;
highlight.parentNode.insertBefore(button, highlight);
} else {
pre.parentNode.insertBefore(button, pre);
}
});
}
element.addEventListener('mouseleave', () => {
copyIcon.style.display = 'none';
copyIcon.classList.replace('fa-check', 'fa-copy');
});
if (navigator && navigator.clipboard) {
addCopyButtons(navigator.clipboard);
} else {
var script = document.createElement('script');
script.src =
'https://cdnjs.cloudflare.com/ajax/libs/clipboard-polyfill/2.7.0/clipboard-polyfill.promise.js';
script.integrity = 'sha256-waClS2re9NUbXRsryKoof+F9qc1gjjIhc2eT7ZbIv94=';
script.crossOrigin = 'anonymous';
copyIcon.addEventListener('click', async () => {
const selection = window.getSelection();
const range = document.createRange();
range.selectNodeContents(element);
selection.removeAllRanges();
selection.addRange(range);
script.onload = function() {
addCopyButtons(clipboard);
};
document.body.appendChild(script);
}
try {
await navigator.clipboard.writeText(selection.toString());
console.log('Text copied to clipboard');
copyIcon.classList.replace('fa-copy', 'fa-check');
selection.removeAllRanges();
} catch (error) {
console.error('Failed to copy: ', error);
}
});
});

Some files were not shown because too many files have changed in this diff Show More