mirror of https://github.com/dapr/docs.git
Merge branch 'v1.11' into issue_1298
This commit is contained in:
commit
afdb7f880a
|
@ -0,0 +1,118 @@
|
|||
import os
|
||||
from re import S
|
||||
import sys
|
||||
import json
|
||||
from bs4 import BeautifulSoup
|
||||
from algoliasearch.search_client import SearchClient
|
||||
|
||||
url = "docs.dapr.io"
|
||||
if len(sys.argv) > 1:
|
||||
starting_directory = os.path.join(os.getcwd(), str(sys.argv[1]))
|
||||
else:
|
||||
starting_directory = os.getcwd()
|
||||
|
||||
ALGOLIA_APP_ID = os.getenv('ALGOLIA_APP_ID')
|
||||
ALGOLIA_API_KEY = os.getenv('ALGOLIA_API_WRITE_KEY')
|
||||
ALGOLIA_INDEX_NAME = os.getenv('ALGOLIA_INDEX_NAME')
|
||||
|
||||
client = SearchClient.create(ALGOLIA_APP_ID, ALGOLIA_API_KEY)
|
||||
index = client.init_index(ALGOLIA_INDEX_NAME)
|
||||
|
||||
excluded_files = [
|
||||
"404.html",
|
||||
]
|
||||
|
||||
exluded_directories = [
|
||||
"zh-hans",
|
||||
]
|
||||
|
||||
rankings = {
|
||||
"Getting started": 0,
|
||||
"Concepts": 100,
|
||||
"Developing applications": 200,
|
||||
"Operations": 300,
|
||||
"Reference": 400,
|
||||
"Contributing": 500,
|
||||
"Home": 600
|
||||
}
|
||||
|
||||
def scan_directory(directory: str, pages: list):
|
||||
if os.path.basename(directory) in exluded_directories:
|
||||
print(f'Skipping directory: {directory}')
|
||||
return
|
||||
for file in os.listdir(directory):
|
||||
path = os.path.join(directory, file)
|
||||
if os.path.isfile(path):
|
||||
if file.endswith(".html") and file not in excluded_files:
|
||||
if '<!-- DISABLE_ALGOLIA -->' not in open(path, encoding="utf8").read():
|
||||
print(f'Indexing: {path}')
|
||||
pages.append(path)
|
||||
else:
|
||||
print(f'Skipping hidden page: {path}')
|
||||
else:
|
||||
scan_directory(path, pages)
|
||||
|
||||
def parse_file(path: str):
|
||||
data = {}
|
||||
data["hierarchy"] = {}
|
||||
data["rank"] = 999
|
||||
data["subrank"] = 99
|
||||
data["type"] = "lvl2"
|
||||
data["lvl0"] = ""
|
||||
data["lvl1"] = ""
|
||||
data["lvl2"] = ""
|
||||
data["lvl3"] = ""
|
||||
text = ""
|
||||
subrank = 0
|
||||
with open(path, "r", errors='ignore') as file:
|
||||
content = file.read()
|
||||
soup = BeautifulSoup(content, "html.parser")
|
||||
for meta in soup.find_all("meta"):
|
||||
if meta.get("name") == "description":
|
||||
data["lvl2"] = meta.get("content")
|
||||
data["hierarchy"]["lvl1"] = meta.get("content")
|
||||
elif meta.get("property") == "og:title":
|
||||
data["lvl0"] = meta.get("content")
|
||||
data["hierarchy"]["lvl0"] = meta.get("content")
|
||||
data["hierarchy"]["lvl2"] = meta.get("content")
|
||||
elif meta.get("property") == "og:url":
|
||||
data["url"] = meta.get("content")
|
||||
data["path"] = meta.get("content").split(url)[1]
|
||||
data["objectID"] = meta.get("content").split(url)[1]
|
||||
breadcrumbs = soup.find_all("li", class_="breadcrumb-item")
|
||||
try:
|
||||
subrank = len(breadcrumbs)
|
||||
data["subrank"] = subrank
|
||||
except:
|
||||
subrank = 99
|
||||
data["subrank"] = 99
|
||||
for bc in breadcrumbs:
|
||||
section = bc.text.strip()
|
||||
data["lvl1"] = section
|
||||
data["hierarchy"]["lvl0"] = section
|
||||
try:
|
||||
data["rank"] = rankings[section] + subrank
|
||||
except:
|
||||
print(f"Rank not found for section {section}")
|
||||
data["rank"] = 998
|
||||
break
|
||||
for p in soup.find_all("p"):
|
||||
if p.text != "":
|
||||
text = text + p.text
|
||||
data["text"] = text
|
||||
return data
|
||||
|
||||
def index_payload(payload):
|
||||
res = index.replace_all_objects(payload)
|
||||
res.wait()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pages = []
|
||||
payload = []
|
||||
scan_directory(starting_directory, pages)
|
||||
for page in pages:
|
||||
data = parse_file(page)
|
||||
if "objectID" in data:
|
||||
payload.append(data)
|
||||
index_payload(payload)
|
|
@ -1,43 +1,75 @@
|
|||
name: Azure Static Web App Root
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- v1.10
|
||||
- v1.11
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
branches:
|
||||
- v1.10
|
||||
- v1.11
|
||||
|
||||
concurrency:
|
||||
# Cancel the previously triggered build for only PR build.
|
||||
group: website-${{ github.event.pull_request.number || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_and_deploy_job:
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed')
|
||||
name: Build Hugo Website
|
||||
if: github.event.action != 'closed'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build and Deploy Job
|
||||
env:
|
||||
SWA_BASE: 'proud-bay-0e9e0e81e'
|
||||
HUGO_ENV: production
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout docs repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
- name: Setup Hugo
|
||||
uses: peaceiris/actions-hugo@v2.5.0
|
||||
with:
|
||||
hugo-version: 0.102.3
|
||||
extended: true
|
||||
- name: Setup Docsy
|
||||
run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli
|
||||
- name: Build And Deploy
|
||||
id: builddeploy
|
||||
run: |
|
||||
cd daprdocs
|
||||
git submodule update --init --recursive
|
||||
sudo npm install -D --save autoprefixer
|
||||
sudo npm install -D --save postcss-cli
|
||||
- name: Build Hugo Website
|
||||
run: |
|
||||
cd daprdocs
|
||||
git config --global --add safe.directory /github/workspace
|
||||
if [ $GITHUB_EVENT_NAME == 'pull_request' ]; then
|
||||
STAGING_URL="https://${SWA_BASE}-${{github.event.number}}.westus2.azurestaticapps.net/"
|
||||
fi
|
||||
hugo ${STAGING_URL+-b "$STAGING_URL"}
|
||||
- name: Deploy docs site
|
||||
uses: Azure/static-web-apps-deploy@v1
|
||||
env:
|
||||
HUGO_ENV: production
|
||||
HUGO_VERSION: "0.100.2"
|
||||
with:
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
|
||||
skip_deploy_on_missing_secrets: true
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
action: "upload"
|
||||
app_location: "/daprdocs"
|
||||
app_build_command: "git config --global --add safe.directory /github/workspace && hugo"
|
||||
output_location: "public"
|
||||
skip_api_build: true
|
||||
app_location: "daprdocs/public/"
|
||||
api_location: "daprdocs/public/"
|
||||
output_location: ""
|
||||
skip_app_build: true
|
||||
skip_deploy_on_missing_secrets: true
|
||||
- name: Upload Hugo artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: hugo_build
|
||||
path: ./daprdocs/public/
|
||||
if-no-files-found: error
|
||||
|
||||
close_pull_request_job:
|
||||
close_staging_site:
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
runs-on: ubuntu-latest
|
||||
name: Close Pull Request Job
|
||||
|
@ -48,3 +80,30 @@ jobs:
|
|||
with:
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
|
||||
action: "close"
|
||||
skip_deploy_on_missing_secrets: true
|
||||
|
||||
algolia_index:
|
||||
name: Index site for Algolia
|
||||
if: github.event_name == 'push'
|
||||
needs: ['build_and_deploy_job']
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }}
|
||||
ALGOLIA_API_WRITE_KEY: ${{ secrets.ALGOLIA_API_WRITE_KEY }}
|
||||
ALGOLIA_INDEX_NAME: daprdocs
|
||||
steps:
|
||||
- name: Checkout docs repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: false
|
||||
- name: Download Hugo artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: hugo_build
|
||||
path: site/
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade bs4
|
||||
pip install --upgrade 'algoliasearch>=2.0,<3.0'
|
||||
- name: Index site
|
||||
run: python ./.github/scripts/algolia.py ./site
|
||||
|
|
|
@ -14,8 +14,8 @@ The following branches are currently maintained:
|
|||
|
||||
| Branch | Website | Description |
|
||||
| ------------------------------------------------------------ | -------------------------- | ------------------------------------------------------------------------------------------------ |
|
||||
| [v1.10](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. |
|
||||
| [v1.11](https://github.com/dapr/docs/tree/v1.11) (pre-release) | https://v1-11.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.11+ go here. |
|
||||
| [v1.11](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. |
|
||||
| [v1.12](https://github.com/dapr/docs/tree/v1.12) (pre-release) | https://v1-12.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.12+ go here. |
|
||||
|
||||
For more information visit the [Dapr branch structure](https://docs.dapr.io/contributing/docs-contrib/contributing-docs/#branch-guidance) document.
|
||||
|
||||
|
|
|
@ -1,38 +1,12 @@
|
|||
// Code formatting.
|
||||
|
||||
.copy-code-button {
|
||||
color: #272822;
|
||||
background-color: #FFF;
|
||||
border-color: #0D2192;
|
||||
border: 2px solid;
|
||||
border-radius: 3px 3px 0px 0px;
|
||||
|
||||
/* right-align */
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: 0;
|
||||
|
||||
margin-bottom: -2px;
|
||||
padding: 3px 8px;
|
||||
font-size: 0.8em;
|
||||
.highlight .copy-icon {
|
||||
position: absolute;
|
||||
right: 20px;
|
||||
top: 18px;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.copy-code-button:hover {
|
||||
cursor: pointer;
|
||||
background-color: #F2F2F2;
|
||||
}
|
||||
|
||||
.copy-code-button:focus {
|
||||
/* Avoid an ugly focus outline on click in Chrome,
|
||||
but darken the button for accessibility.
|
||||
See https://stackoverflow.com/a/25298082/1481479 */
|
||||
background-color: #E6E6E6;
|
||||
outline: 0;
|
||||
}
|
||||
|
||||
.copy-code-button:active {
|
||||
background-color: #D9D9D9;
|
||||
}
|
||||
|
||||
.highlight pre {
|
||||
/* Avoid pushing up the copy buttons. */
|
||||
|
@ -40,25 +14,31 @@
|
|||
}
|
||||
|
||||
.td-content {
|
||||
// Highlighted code.
|
||||
|
||||
// Highlighted code.
|
||||
.highlight {
|
||||
@extend .card;
|
||||
|
||||
|
||||
margin: 0rem 0;
|
||||
padding: 0rem;
|
||||
|
||||
margin-bottom: 2rem;
|
||||
|
||||
max-width: 100%;
|
||||
|
||||
|
||||
border: none;
|
||||
|
||||
pre {
|
||||
margin: 0;
|
||||
padding: 1rem;
|
||||
border-radius: 10px;
|
||||
}
|
||||
}
|
||||
|
||||
// Inline code
|
||||
p code, li > code, table code {
|
||||
p code,
|
||||
li>code,
|
||||
table code {
|
||||
color: inherit;
|
||||
padding: 0.2em 0.4em;
|
||||
margin: 0;
|
||||
|
@ -78,11 +58,11 @@
|
|||
word-wrap: normal;
|
||||
background-color: $gray-100;
|
||||
padding: $spacer;
|
||||
|
||||
|
||||
max-width: 100%;
|
||||
|
||||
> code {
|
||||
background-color: inherit !important;
|
||||
>code {
|
||||
background-color: inherit !important;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
font-size: 100%;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Site Configuration
|
||||
baseURL = "https://v1-11.docs.dapr.io"
|
||||
baseURL = "https://docs.dapr.io"
|
||||
title = "Dapr Docs"
|
||||
theme = "docsy"
|
||||
disableFastRender = true
|
||||
|
@ -171,17 +171,20 @@ github_subdir = "daprdocs"
|
|||
github_branch = "v1.11"
|
||||
|
||||
# Versioning
|
||||
version_menu = "v1.11 (preview)"
|
||||
version_menu = "v1.11 (latest)"
|
||||
version = "v1.11"
|
||||
archived_version = false
|
||||
url_latest_version = "https://docs.dapr.io"
|
||||
|
||||
[[params.versions]]
|
||||
version = "v1.11 (preview)"
|
||||
version = "v1.12 (preview)"
|
||||
url = "#"
|
||||
[[params.versions]]
|
||||
version = "v1.10 (latest)"
|
||||
version = "v1.11 (latest)"
|
||||
url = "https://docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.10"
|
||||
url = "https://v1-10.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.9"
|
||||
url = "https://v1-9.docs.dapr.io"
|
||||
|
@ -203,27 +206,6 @@ url_latest_version = "https://docs.dapr.io"
|
|||
[[params.versions]]
|
||||
version = "v1.3"
|
||||
url = "https://v1-3.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.2"
|
||||
url = "https://v1-2.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.1"
|
||||
url = "https://v1-1.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.0"
|
||||
url = "https://v1-0.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v0.11"
|
||||
url = "https://v0-11.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v0.10"
|
||||
url = "https://github.com/dapr/docs/tree/v0.10.0"
|
||||
[[params.versions]]
|
||||
version = "v0.9"
|
||||
url = "https://github.com/dapr/docs/tree/v0.9.0"
|
||||
[[params.versions]]
|
||||
version = "v0.8"
|
||||
url = "https://github.com/dapr/docs/tree/v0.8.0"
|
||||
|
||||
# UI Customization
|
||||
[params.ui]
|
||||
|
|
|
@ -6,15 +6,15 @@ weight: 200
|
|||
description: "Modular best practices accessible over standard HTTP or gRPC APIs"
|
||||
---
|
||||
|
||||
A [building block]({{< ref building-blocks >}}) is an HTTP or gRPC API that can be called from your code and uses one or more Dapr components.
|
||||
|
||||
Building blocks address common challenges in building resilient, microservices applications and codify best practices and patterns. Dapr consists of a set of building blocks, with extensibility to add new building blocks.
|
||||
A [building block]({{< ref building-blocks >}}) is an HTTP or gRPC API that can be called from your code and uses one or more Dapr components. Dapr consists of a set of API building blocks, with extensibility to add new building blocks. Dapr's building blocks:
|
||||
- Address common challenges in building resilient, microservices applications
|
||||
- Codify best practices and patterns
|
||||
|
||||
The diagram below shows how building blocks expose a public API that is called from your code, using components to implement the building blocks' capability.
|
||||
|
||||
<img src="/images/concepts-building-blocks.png" width=250>
|
||||
|
||||
The following are the building blocks provided by Dapr:
|
||||
Dapr provides the following building blocks:
|
||||
|
||||
<img src="/images/building_blocks.png" width=1200>
|
||||
|
||||
|
@ -25,7 +25,6 @@ The following are the building blocks provided by Dapr:
|
|||
| [**Publish and subscribe**]({{< ref "pubsub-overview.md" >}}) | `/v1.0/publish` `/v1.0/subscribe`| Pub/Sub is a loosely coupled messaging pattern where senders (or publishers) publish messages to a topic, to which subscribers subscribe. Dapr supports the pub/sub pattern between applications.
|
||||
| [**Bindings**]({{< ref "bindings-overview.md" >}}) | `/v1.0/bindings` | A binding provides a bi-directional connection to an external cloud/on-premise service or system. Dapr allows you to invoke the external service through the Dapr binding API, and it allows your application to be triggered by events sent by the connected service.
|
||||
| [**Actors**]({{< ref "actors-overview.md" >}}) | `/v1.0/actors` | An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the virtual actor pattern which provides a single-threaded programming model and where actors are garbage collected when not in use.
|
||||
| [**Observability**]({{< ref "observability-concept.md" >}}) | `N/A` | Dapr system components and runtime emit metrics, logs, and traces to debug, operate and monitor Dapr system services, components and user applications.
|
||||
| [**Secrets**]({{< ref "secrets-overview.md" >}}) | `/v1.0/secrets` | Dapr provides a secrets building block API and integrates with secret stores such as public cloud stores, local stores and Kubernetes to store the secrets. Services can call the secrets API to retrieve secrets, for example to get a connection string to a database.
|
||||
| [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | `/v1.0/configuration` | The Configuration API enables you to retrieve and subscribe to application configuration items for supported configuration stores. This enables an application to retrieve specific configuration information, for example, at start up or when configuration changes are made in the store.
|
||||
| [**Distributed lock**]({{< ref "distributed-lock-api-overview.md" >}}) | `/v1.0-alpha1/lock` | The distributed lock API enables you to take a lock on a resource so that multiple instances of an application can access the resource without conflicts and provide consistency guarantees.
|
||||
|
|
|
@ -11,7 +11,7 @@ Dapr uses a modular design where functionality is delivered as a component. Each
|
|||
You can contribute implementations and extend Dapr's component interfaces capabilities via:
|
||||
|
||||
- The [components-contrib repository](https://github.com/dapr/components-contrib)
|
||||
- [Pluggable components]({{<ref "components-concept.md#built-in-and-pluggable-components" >}}).
|
||||
- [Pluggable components]({{< ref "components-concept.md#built-in-and-pluggable-components" >}}).
|
||||
|
||||
A building block can use any combination of components. For example, the [actors]({{< ref "actors-overview.md" >}}) and the [state management]({{< ref "state-management-overview.md" >}}) building blocks both use [state components](https://github.com/dapr/components-contrib/tree/master/state).
|
||||
|
||||
|
@ -19,6 +19,10 @@ As another example, the [pub/sub]({{< ref "pubsub-overview.md" >}}) building blo
|
|||
|
||||
You can get a list of current components available in the hosting environment using the `dapr components` CLI command.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
For any component that returns data to the app, it is recommended to set the memory capacity of the Dapr sidecar accordingly (process or container) to avoid potential OOM panics. For example in Docker use the `--memory` option. For Kubernetes, use the `dapr.io/sidecar-memory-limit` annotation. For processes this depends on the OS and/or process orchestration tools.*
|
||||
{{% /alert %}}
|
||||
|
||||
## Component specification
|
||||
|
||||
Each component has a specification (or spec) that it conforms to. Components are configured at design-time with a YAML file which is stored in either:
|
||||
|
|
|
@ -49,25 +49,31 @@ For a detailed list of all available arguments run `daprd --help` or see this [t
|
|||
daprd --app-id myapp
|
||||
```
|
||||
|
||||
2. Specify the port your application is listening to
|
||||
1. Specify the port your application is listening to
|
||||
|
||||
```bash
|
||||
daprd --app-id --app-port 5000
|
||||
```
|
||||
|
||||
3. If you are using several custom resources and want to specify the location of the resource definition files, use the `--resources-path` argument:
|
||||
1. If you are using several custom resources and want to specify the location of the resource definition files, use the `--resources-path` argument:
|
||||
|
||||
```bash
|
||||
daprd --app-id myapp --resources-path <PATH-TO-RESOURCES-FILES>
|
||||
```
|
||||
|
||||
4. Enable collection of Prometheus metrics while running your app
|
||||
1. If you've organized your components and other resources (for example, resiliency policies, subscriptions, or configuration) into separate folders or a shared folder, you can specify multiple resource paths:
|
||||
|
||||
```bash
|
||||
daprd --app-id myapp --resources-path <PATH-1-TO-RESOURCES-FILES> --resources-path <PATH-2-TO-RESOURCES-FILES>
|
||||
```
|
||||
|
||||
1. Enable collection of Prometheus metrics while running your app
|
||||
|
||||
```bash
|
||||
daprd --app-id myapp --enable-metrics
|
||||
```
|
||||
|
||||
5. Listen to IPv4 and IPv6 loopback only
|
||||
1. Listen to IPv4 and IPv6 loopback only
|
||||
|
||||
```bash
|
||||
daprd --app-id myapp --dapr-listen-addresses '127.0.0.1,[::1]'
|
||||
|
|
|
@ -7,42 +7,68 @@ description: >
|
|||
Observe applications through tracing, metrics, logs and health
|
||||
---
|
||||
|
||||
When building an application, understanding how the system is behaving is an important part of operating it - this includes having the ability to observe the internal calls of an application, gauging its performance and becoming aware of problems as soon as they occur. This is challenging for any system, but even more so for a distributed system comprised of multiple microservices where a flow, made of several calls, may start in one microservice but continue in another. Observability is critical in production environments, but also useful during development to understand bottlenecks, improve performance and perform basic debugging across the span of microservices.
|
||||
When building an application, understanding the system behavior is an important, yet challenging part of operating it, such as:
|
||||
- Observing the internal calls of an application
|
||||
- Gauging its performance
|
||||
- Becoming aware of problems as soon as they occur
|
||||
|
||||
While some data points about an application can be gathered from the underlying infrastructure (for example memory consumption, CPU usage), other meaningful information must be collected from an "application-aware" layer–one that can show how an important series of calls is executed across microservices. This usually means a developer must add some code to instrument an application for this purpose. Often, instrumentation code is simply meant to send collected data such as traces and metrics to observability tools or services that can help store, visualize and analyze all this information.
|
||||
This can be particularly challenging for a distributed system comprised of multiple microservices, where a flow made of several calls may start in one microservice and continue in another.
|
||||
|
||||
Having to maintain this code, which is not part of the core logic of the application, is a burden on the developer, sometimes requiring understanding the observability tools' APIs, using additional SDKs etc. This instrumentation may also add to the portability challenges of an application, which may require different instrumentation depending on where the application is deployed. For example, different cloud providers offer different observability tools and an on-premises deployment might require a self-hosted solution.
|
||||
Observability into your application is critical in production environments, and can be useful during development to:
|
||||
- Understand bottlenecks
|
||||
- Improve performance
|
||||
- Perform basic debugging across the span of microservices
|
||||
|
||||
While some data points about an application can be gathered from the underlying infrastructure (memory consumption, CPU usage), other meaningful information must be collected from an "application-aware" layer – one that can show how an important series of calls is executed across microservices. Typically, you'd add some code to instrument an application, which simply sends collected data (such as traces and metrics) to observability tools or services that can help store, visualize, and analyze all this information.
|
||||
|
||||
Maintaining this instrumentation code, which is not part of the core logic of the application, requires understanding the observability tools' APIs, using additional SDKs, etc. This instrumentation may also present portability challenges for your application, requiring different instrumentation depending on where the application is deployed. For example:
|
||||
- Different cloud providers offer different observability tools
|
||||
- An on-premises deployment might require a self-hosted solution
|
||||
|
||||
## Observability for your application with Dapr
|
||||
|
||||
When building an application which leverages Dapr API building blocks to perform service-to-service calls and pub/sub messaging, Dapr offers an advantage with respect to [distributed tracing]({{<ref tracing>}}). Because this inter-service communication flows through the Dapr runtime (or "sidecar"), Dapr is in a unique position to offload the burden of application-level instrumentation.
|
||||
When you leverage Dapr API building blocks to perform service-to-service calls, pub/sub messaging, and other APIs, Dapr offers an advantage with respect to [distributed tracing]({{< ref tracing >}}). Since this inter-service communication flows through the Dapr runtime (or "sidecar"), Dapr is in a unique position to offload the burden of application-level instrumentation.
|
||||
|
||||
### Distributed tracing
|
||||
|
||||
Dapr can be [configured to emit tracing data]({{<ref setup-tracing.md>}}), and because Dapr does so using the widely adopted protocols of [Open Telemetry (OTEL)](https://opentelemetry.io/) and [Zipkin](https://zipkin.io), it can be easily integrated with multiple observability tools.
|
||||
Dapr can be [configured to emit tracing data]({{< ref setup-tracing.md >}}) using the widely adopted protocols of [Open Telemetry (OTEL)](https://opentelemetry.io/) and [Zipkin](https://zipkin.io). This makes it easily integrated with multiple observability tools.
|
||||
|
||||
<img src="/images/observability-tracing.png" width=1000 alt="Distributed tracing with Dapr">
|
||||
|
||||
### Automatic tracing context generation
|
||||
|
||||
Dapr uses [W3C tracing]({{<ref w3c-tracing-overview>}}) specification for tracing context, included as part Open Telemetry (OTEL), to generate and propagate the context header for the application or propagate user-provided context headers. This means that you get tracing by default with Dapr.
|
||||
Dapr uses the [W3C tracing]({{< ref tracing >}}) specification for tracing context, included as part Open Telemetry (OTEL), to generate and propagate the context header for the application or propagate user-provided context headers. This means that you get tracing by default with Dapr.
|
||||
|
||||
## Observability for the Dapr sidecar and control plane
|
||||
|
||||
You also want to be able to observe Dapr itself, by collecting metrics on performance, throughput and latency and logs emitted by the Dapr sidecar, as well as the Dapr control plane services. Dapr sidecars have a health endpoint that can be probed to indicate their health status.
|
||||
You can also observe Dapr itself, by:
|
||||
- Generating logs emitted by the Dapr sidecar and the Dapr control plane services
|
||||
- Collecting metrics on performance, throughput, and latency
|
||||
- Using health endpoints probes to indicate the Dapr sidecar health status
|
||||
|
||||
<img src="/images/observability-sidecar.png" width=1000 alt="Dapr sidecar metrics, logs and health checks">
|
||||
|
||||
### Logging
|
||||
|
||||
Dapr generates [logs]({{<ref "logs.md">}}) to provide visibility into sidecar operation and to help users identify issues and perform debugging. Log events contain warning, error, info, and debug messages produced by Dapr system services. Dapr can also be configured to send logs to collectors such as [Fluentd]({{< ref fluentd.md >}}), [Azure Monitor]({{< ref azure-monitor.md >}}), and other observability tools, so that logs can be searched and analyzed to provide insights.
|
||||
Dapr generates [logs]({{< ref logs.md >}}) to:
|
||||
- Provide visibility into sidecar operation
|
||||
- Help users identify issues and perform debugging
|
||||
|
||||
Log events contain warning, error, info, and debug messages produced by Dapr system services. You can also configure Dapr to send logs to collectors, such as [Open Telemetry Collector]({{< ref otel-collector >}}), [Fluentd]({{< ref fluentd.md >}}), [New Relic]({{< ref "operations/observability/logging/newrelic.md" >}}), [Azure Monitor]({{< ref azure-monitor.md >}}), and other observability tools, so that logs can be searched and analyzed to provide insights.
|
||||
|
||||
### Metrics
|
||||
|
||||
Metrics are the series of measured values and counts that are collected and stored over time. [Dapr metrics]({{<ref "metrics">}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and control plane. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests, etc. Dapr [control plane metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures and the health of control plane services, including CPU usage, number of actor placements made, etc.
|
||||
Metrics are a series of measured values and counts collected and stored over time. [Dapr metrics]({{< ref metrics >}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and control plane. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests, etc.
|
||||
|
||||
Dapr [control plane metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures and the health of control plane services, including CPU usage, number of actor placements made, etc.
|
||||
|
||||
### Health checks
|
||||
|
||||
The Dapr sidecar exposes an HTTP endpoint for [health checks]({{<ref sidecar-health.md>}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness.
|
||||
The Dapr sidecar exposes an HTTP endpoint for [health checks]({{< ref sidecar-health.md >}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness.
|
||||
|
||||
Conversely, Dapr can be configured to probe for the [health of your application]({{<ref app-health.md >}}), and react to changes in the app's health, including stopping pub/sub subscriptions and short-circuiting service invocation calls.
|
||||
Conversely, Dapr can be configured to probe for the [health of your application]({{< ref app-health.md >}}), and react to changes in the app's health, including stopping pub/sub subscriptions and short-circuiting service invocation calls.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Learn more about observability in developing with Dapr]({{< ref tracing >}})
|
||||
- [Learn more about observability in operating with Dapr]({{< ref tracing >}})
|
|
@ -7,7 +7,7 @@ description: >
|
|||
Introduction to the Distributed Application Runtime
|
||||
---
|
||||
|
||||
Dapr is a portable, event-driven runtime that makes it easy for any developer to build resilient, stateless and stateful applications that run on the cloud and edge and embraces the diversity of languages and developer frameworks.
|
||||
Dapr is a portable, event-driven runtime that makes it easy for any developer to build resilient, stateless, and stateful applications that run on the cloud and edge and embraces the diversity of languages and developer frameworks.
|
||||
|
||||
<div class="embed-responsive embed-responsive-16by9">
|
||||
<iframe width="1120" height="630" src="https://www.youtube-nocookie.com/embed/9o9iDAgYBA8" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
@ -15,23 +15,32 @@ Dapr is a portable, event-driven runtime that makes it easy for any developer to
|
|||
|
||||
## Any language, any framework, anywhere
|
||||
|
||||
<img src="/images/overview.png" width=1200>
|
||||
<img src="/images/overview.png" width=1200 style="padding-bottom:15px;">
|
||||
|
||||
Today we are experiencing a wave of cloud adoption. Developers are comfortable with web + database application architectures, for example classic 3-tier designs, but not with microservice application architectures which are inherently distributed. It’s hard to become a distributed systems expert, nor should you have to. Developers want to focus on business logic, while leaning on the platforms to imbue their applications with scale, resiliency, maintainability, elasticity and the other attributes of cloud-native architectures.
|
||||
With the current wave of cloud adoption, web + database application architectures (such as classic 3-tier designs) are trending more toward microservice application architectures, which are inherently distributed. You shouldn't have to become a distributed systems expert just to create microservices applications.
|
||||
|
||||
This is where Dapr comes in. Dapr codifies the *best practices* for building microservice applications into open, independent APIs called building blocks, that enable you to build portable applications with the language and framework of your choice. Each building block is completely independent and you can use one, some, or all of them in your application.
|
||||
This is where Dapr comes in. Dapr codifies the *best practices* for building microservice applications into open, independent APIs called [building blocks]({{< ref "#microservice-building-blocks-for-cloud-and-edge" >}}). Dapr's building blocks:
|
||||
- Enable you to build portable applications using the language and framework of your choice.
|
||||
- Are completely independent
|
||||
- Have no limit to how many you use in your application
|
||||
|
||||
Using Dapr you can incrementally migrate your existing applications to a microservices architecture, thereby adopting cloud native patterns such scale out/in, resiliency and independent deployments.
|
||||
Using Dapr, you can incrementally migrate your existing applications to a microservices architecture, thereby adopting cloud native patterns such scale out/in, resiliency, and independent deployments.
|
||||
|
||||
In addition, Dapr is platform agnostic, meaning you can run your applications locally, on any Kubernetes cluster, on virtual or physical machines and in other hosting environments that Dapr integrates with. This enables you to build microservice applications that can run on the cloud and edge.
|
||||
Dapr is platform agnostic, meaning you can run your applications:
|
||||
- Locally
|
||||
- On any Kubernetes cluster
|
||||
- On virtual or physical machines
|
||||
- In other hosting environments that Dapr integrates with.
|
||||
|
||||
This enables you to build microservice applications that can run on the cloud and edge.
|
||||
|
||||
## Microservice building blocks for cloud and edge
|
||||
|
||||
<img src="/images/building_blocks.png" width=1200>
|
||||
<img src="/images/building_blocks.png" width=1200 style="padding-bottom:15px;">
|
||||
|
||||
There are many considerations when architecting microservices applications. Dapr provides best practices for common capabilities when building microservice applications that developers can use in a standard way, and deploy to any environment. It does this by providing distributed system building blocks.
|
||||
Dapr provides distributed system building blocks for you to build microservice applications in a standard way and to deploy to any environment.
|
||||
|
||||
Each of these building block APIs is independent, meaning that you can use one, some, or all of them in your application. The following building blocks are available:
|
||||
Each of these building block APIs is independent, meaning that you can use any number of them in your application.
|
||||
|
||||
| Building Block | Description |
|
||||
|----------------|-------------|
|
||||
|
@ -40,13 +49,22 @@ Each of these building block APIs is independent, meaning that you can use one,
|
|||
| [**Publish and subscribe**]({{< ref "pubsub-overview.md" >}}) | Publishing events and subscribing to topics between services enables event-driven architectures to simplify horizontal scalability and make them resilient to failure. Dapr provides at-least-once message delivery guarantee, message TTL, consumer groups and other advance features.
|
||||
| [**Resource bindings**]({{< ref "bindings-overview.md" >}}) | Resource bindings with triggers builds further on event-driven architectures for scale and resiliency by receiving and sending events to and from any external source such as databases, queues, file systems, etc.
|
||||
| [**Actors**]({{< ref "actors-overview.md" >}}) | A pattern for stateful and stateless objects that makes concurrency simple, with method and state encapsulation. Dapr provides many capabilities in its actor runtime, including concurrency, state, and life-cycle management for actor activation/deactivation, and timers and reminders to wake up actors.
|
||||
| [**Observability**]({{< ref "observability-concept.md" >}}) | Dapr emits metrics, logs, and traces to debug and monitor both Dapr and user applications. Dapr supports distributed tracing to easily diagnose and serve inter-service calls in production using the W3C Trace Context standard and Open Telemetry to send to different monitoring tools.
|
||||
| [**Secrets**]({{< ref "secrets-overview.md" >}}) | The secrets management API integrates with public cloud and local secret stores to retrieve the secrets for use in application code.
|
||||
| [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | The configuration API enables you to retrieve and subscribe to application configuration items from configuration stores.
|
||||
| [**Distributed lock**]({{< ref "distributed-lock-api-overview.md" >}}) | The distributed lock API enables your application to acquire a lock for any resource that gives it exclusive access until either the lock is released by the application, or a lease timeout occurs.
|
||||
| [**Workflows**]({{< ref "workflow-overview.md" >}}) | The workflow API can be combined with other Dapr building blocks to define long running, persistent processes or data flows that span multiple microservices using Dapr workflows or workflow components.
|
||||
| [**Cryptography**]({{< ref "cryptography-overview.md" >}}) | The cryptography API provides an abstraction layer on top of security infrastructure such as key vaults. It contains APIs that allow you to perform cryptographic operations, such as encrypting and decrypting messages, without exposing keys to your applications.
|
||||
|
||||
### Cross-cutting APIs
|
||||
|
||||
Alongside its building blocks, Dapr provides cross-cutting APIs that apply across all the build blocks you use.
|
||||
|
||||
| Building Block | Description |
|
||||
|----------------|-------------|
|
||||
| [**Resiliency**]({{< ref "resiliency-concept.md" >}}) | Dapr provides the capability to define and apply fault tolerance resiliency policies via a resiliency spec. Supported specs define policies for resiliency patterns such as timeouts, retries/back-offs, and circuit breakers.
|
||||
| [**Observability**]({{< ref "observability-concept.md" >}}) | Dapr emits metrics, logs, and traces to debug and monitor both Dapr and user applications. Dapr supports distributed tracing to easily diagnose and serve inter-service calls in production using the W3C Trace Context standard and Open Telemetry to send to different monitoring tools.
|
||||
| [**Security**]({{< ref "security-concept.md" >}}) | Dapr supports in-transit encryption of communication between Dapr instances using the Dapr control plane, Sentry service. You can bring in your own certificates, or let Dapr automatically create and persist self-signed root and issuer certificates.
|
||||
|
||||
## Sidecar architecture
|
||||
|
||||
Dapr exposes its HTTP and gRPC APIs as a sidecar architecture, either as a container or as a process, not requiring the application code to include any Dapr runtime code. This makes integration with Dapr easy from other runtimes, as well as providing separation of the application logic for improved supportability.
|
||||
|
@ -55,33 +73,41 @@ Dapr exposes its HTTP and gRPC APIs as a sidecar architecture, either as a conta
|
|||
|
||||
## Hosting environments
|
||||
|
||||
Dapr can be hosted in multiple environments, including self-hosted on a Windows/Linux/macOS machines for local development and on Kubernetes or clusters of physical or virtual machines in production.
|
||||
Dapr can be hosted in multiple environments, including:
|
||||
- Self-hosted on a Windows/Linux/macOS machine for local development
|
||||
- On Kubernetes or clusters of physical or virtual machines in production
|
||||
|
||||
### Self-hosted local development
|
||||
|
||||
In [self-hosted mode]({{< ref self-hosted-overview.md >}}) Dapr runs as a separate sidecar process which your service code can call via HTTP or gRPC. Each running service has a Dapr runtime process (or sidecar) which is configured to use state stores, pub/sub, binding components and the other building blocks.
|
||||
In [self-hosted mode]({{< ref self-hosted-overview.md >}}), Dapr runs as a separate sidecar process, which your service code can call via HTTP or gRPC. Each running service has a Dapr runtime process (or sidecar) configured to use state stores, pub/sub, binding components, and the other building blocks.
|
||||
|
||||
You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app) to run a Dapr-enabled application on your local machine. The diagram below show Dapr's local development environment when configured with the CLI `init` command. Try this out with the [getting started samples]({{< ref getting-started >}}).
|
||||
You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app) to run a Dapr-enabled application on your local machine. In the following diagram, Dapr's local development environment gets configured with the CLI `init` command. Try this out with the [getting started samples]({{< ref getting-started >}}).
|
||||
|
||||
<img src="/images/overview-standalone.png" width=1200 alt="Architecture diagram of Dapr in self-hosted mode">
|
||||
|
||||
### Kubernetes
|
||||
|
||||
Kubernetes can be used for either local development (for example with [minikube](https://minikube.sigs.k8s.io/docs/), [k3S](https://k3s.io/)) or in [production]({{< ref kubernetes >}}). In container hosting environments such as Kubernetes, Dapr runs as a sidecar container with the application container in the same pod.
|
||||
Kubernetes can be used for either:
|
||||
- Local development (for example, with [minikube](https://minikube.sigs.k8s.io/docs/) and [k3S](https://k3s.io/)), or
|
||||
- In [production]({{< ref kubernetes >}}).
|
||||
|
||||
Dapr has control plane services. The `dapr-sidecar-injector` and `dapr-operator` services provide first-class integration to launch Dapr as a sidecar container in the same pod as the service container and provide notifications of Dapr component updates provisioned in the cluster.
|
||||
In container hosting environments such as Kubernetes, Dapr runs as a sidecar container with the application container in the same pod.
|
||||
|
||||
Dapr's `dapr-sidecar-injector` and `dapr-operator` control plane services provide first-class integration to:
|
||||
- Launch Dapr as a sidecar container in the same pod as the service container
|
||||
- Provide notifications of Dapr component updates provisioned in the cluster
|
||||
|
||||
<!-- IGNORE_LINKS -->
|
||||
The `dapr-sentry` service is a certificate authority that enables mutual TLS between Dapr sidecar instances for secure data encryption, as well as providing identity via [Spiffe](https://spiffe.io/). For more information on the `Sentry` service, read the [security overview]({{< ref "security-concept.md#dapr-to-dapr-communication" >}})
|
||||
<!-- END_IGNORE -->
|
||||
|
||||
Deploying and running a Dapr-enabled application into your Kubernetes cluster is as simple as adding a few annotations to the deployment schemes. Visit the [Dapr on Kubernetes docs]({{< ref kubernetes >}})
|
||||
Deploying and running a Dapr-enabled application into your Kubernetes cluster is as simple as adding a few annotations to the deployment schemes. Visit the [Dapr on Kubernetes docs]({{< ref kubernetes >}}).
|
||||
|
||||
<img src="/images/overview-kubernetes.png" width=1200 alt="Architecture diagram of Dapr in Kubernetes mode">
|
||||
|
||||
### Clusters of physical or virtual machines
|
||||
|
||||
The Dapr control plane services can be deployed in High Availability (HA) mode to clusters of physical or virtual machines in production, for example, as shown in the diagram below. Here the Actor `Placement` and `Sentry` services are started on three different VMs to provide HA control plane. In order to provide name resolution using DNS for the applications running in the cluster, Dapr uses [Hashicorp Consul service]({{< ref setup-nr-consul >}}), also running in HA mode.
|
||||
The Dapr control plane services can be deployed in high availability (HA) mode to clusters of physical or virtual machines in production. In the diagram below, the Actor `Placement` and security `Sentry` services are started on three different VMs to provide HA control plane. In order to provide name resolution using DNS for the applications running in the cluster, Dapr uses [Hashicorp Consul service]({{< ref setup-nr-consul >}}), also running in HA mode.
|
||||
|
||||
<img src="/images/overview-vms-hosting.png" width=1200 alt="Architecture diagram of Dapr control plane and Consul deployed to VMs in high availability mode">
|
||||
|
||||
|
@ -91,17 +117,15 @@ Dapr offers a variety of SDKs and frameworks to make it easy to begin developing
|
|||
|
||||
### Dapr SDKs
|
||||
|
||||
To make using Dapr more natural for different languages, it also includes [language specific SDKs]({{<ref sdks>}}) for:
|
||||
- C++
|
||||
To make using Dapr more natural for different languages, it also includes [language specific SDKs]({{< ref sdks >}}) for:
|
||||
- Go
|
||||
- Java
|
||||
- JavaScript
|
||||
- .NET
|
||||
- PHP
|
||||
- Python
|
||||
- Rust
|
||||
|
||||
These SDKs expose the functionality of the Dapr building blocks through a typed language API, rather than calling the http/gRPC API. This enables you to write a combination of stateless and stateful functions and actors all in the language of your choice. And because these SDKs share the Dapr runtime, you get cross-language actor and function support.
|
||||
These SDKs expose the functionality of the Dapr building blocks through a typed language API, rather than calling the http/gRPC API. This enables you to write a combination of stateless and stateful functions and actors all in the language of your choice. Since these SDKs share the Dapr runtime, you get cross-language actor and function support.
|
||||
|
||||
### Developer frameworks
|
||||
|
||||
|
@ -120,7 +144,7 @@ Dapr can be used from any developer framework. Here are some that have been inte
|
|||
#### Integrations and extensions
|
||||
|
||||
Visit the [integrations]({{< ref integrations >}}) page to learn about some of the first-class support Dapr has for various frameworks and external products, including:
|
||||
- Public cloud services
|
||||
- Public cloud services, like Azure and AWS
|
||||
- Visual Studio Code
|
||||
- GitHub
|
||||
|
||||
|
@ -128,6 +152,6 @@ Visit the [integrations]({{< ref integrations >}}) page to learn about some of t
|
|||
|
||||
Dapr is designed for [operations]({{< ref operations >}}) and security. The Dapr sidecars, runtime, components, and configuration can all be managed and deployed easily and securely to match your organization's needs.
|
||||
|
||||
The [dashboard](https://github.com/dapr/dashboard), installed via the Dapr CLI, provides a web-based UI enabling you to see information, view logs and more for running Dapr applications.
|
||||
The [dashboard](https://github.com/dapr/dashboard), installed via the Dapr CLI, provides a web-based UI enabling you to see information, view logs, and more for running Dapr applications.
|
||||
|
||||
The [monitoring tools support]({{< ref monitoring >}}) provides deeper visibility into the Dapr system services and side-cars and the [observability capabilities]({{<ref "observability-concept.md">}}) of Dapr provide insights into your application such as tracing and metrics.
|
||||
Dapr supports [monitoring tools]({{< ref observability >}}) for deeper visibility into the Dapr system services and sidecars, while the [observability capabilities]({{< ref "observability-concept.md" >}}) of Dapr provide insights into your application, such as tracing and metrics.
|
||||
|
|
|
@ -211,6 +211,36 @@ The Dapr threat model is below.
|
|||
|
||||
## Security audit
|
||||
|
||||
### September 2023
|
||||
|
||||
In September 2023, Dapr completed a security audit done by Ada Logics.
|
||||
|
||||
The audit was a holistic security audit with the following goals:
|
||||
|
||||
- Formalize a threat model of Dapr
|
||||
- Perform manual code review
|
||||
- Evaluate Daprs fuzzing suite against the formalized threat model
|
||||
- Carry out a SLSA review of Dapr.
|
||||
|
||||
You can find the full report [here](/docs/Dapr-september-2023-security-audit-report.pdf).
|
||||
|
||||
The audit found 7 issues none of which were of high or critical severity. One CVE was assigned from an issue in a 3rd-party dependency to Dapr Components Contrib
|
||||
|
||||
### June 2023
|
||||
|
||||
In June 2023, Dapr completed a fuzzing audit done by Ada Logics.
|
||||
|
||||
The audit achieved the following:
|
||||
|
||||
- OSS-Fuzz integration
|
||||
- 39 new fuzzers for Dapr
|
||||
- Fuzz test coverage for Dapr Runtime, Kit and Components-contrib
|
||||
- All fuzzers running continuously after the audit has completed
|
||||
|
||||
You can find the full report [here](/docs/Dapr-june-2023-fuzzing-audit-report.pdf).
|
||||
|
||||
3 issues were found during the audit.
|
||||
|
||||
### February 2021
|
||||
|
||||
In February 2021, Dapr went through a 2nd security audit targeting its 1.0 release by Cure53.
|
||||
|
@ -255,4 +285,4 @@ Visit [this page]({{< ref support-security-issues.md >}}) to report a security i
|
|||
|
||||
## Related links
|
||||
|
||||
[Operational Security]({{< ref "security.md" >}})
|
||||
[Operational Security]({{< ref "security.md" >}})
|
||||
|
|
|
@ -39,11 +39,11 @@ Style and tone conventions should be followed throughout all Dapr documentation
|
|||
|
||||
## Diagrams and images
|
||||
|
||||
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/presentations), which includes guidance on style and icons.
|
||||
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/presentations), which includes guidance on style and icons.
|
||||
|
||||
As you create diagrams for your documentation:
|
||||
|
||||
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/images).
|
||||
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/images).
|
||||
- Name your PNG files using the convention of a concept or building block so that they are grouped.
|
||||
- For example: `service-invocation-overview.png`.
|
||||
- For more information on calling out images using shortcode, see the [Images guidance](#images) section below.
|
||||
|
@ -458,4 +458,4 @@ Steps to add a language:
|
|||
|
||||
## Next steps
|
||||
|
||||
Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}).
|
||||
Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}).
|
||||
|
|
|
@ -8,4 +8,5 @@ description: "Dapr capabilities that solve common development challenges for dis
|
|||
|
||||
Get a high-level [overview of Dapr building blocks]({{< ref building-blocks-concept >}}) in the **Concepts** section.
|
||||
|
||||
<img src="/images/buildingblocks-overview.png" alt="Diagram showing the different Dapr API building blocks" width=1000>
|
||||
<img src="/images/buildingblocks-overview.png" alt="Diagram showing the different Dapr API building blocks" width=1000>
|
||||
|
||||
|
|
|
@ -5,3 +5,10 @@ linkTitle: "Actors"
|
|||
weight: 50
|
||||
description: Encapsulate code and data in reusable actor objects as a common microservices design pattern
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Actors" color="primary" %}}
|
||||
Learn more about how to use Dapr Actors:
|
||||
- Try the [Actors quickstart]({{< ref actors-quickstart.md >}}).
|
||||
- Explore actors via any of the [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Actors API reference documentation]({{< ref actors_api.md >}}).
|
||||
{{% /alert %}}
|
||||
|
|
|
@ -20,7 +20,11 @@ Dapr includes a runtime that specifically implements the [Virtual Actor pattern]
|
|||
|
||||
Every actor is defined as an instance of an actor type, identical to the way an object is an instance of a class. For example, there may be an actor type that implements the functionality of a calculator and there could be many actors of that type that are distributed on various nodes across a cluster. Each such actor is uniquely identified by an actor ID.
|
||||
|
||||
<img src="/images/actor_background_game_example.png" width=400>
|
||||
<img src="/images/actor_background_game_example.png" width=400 style="padding-bottom:25px;">
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=dWNgtsp61f3Sjq0n&t=10797) demonstrates how actors in Dapr work.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=dWNgtsp61f3Sjq0n&start=10797" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
## Actor types and IDs
|
||||
|
||||
|
@ -109,6 +113,10 @@ The functionality of timers and reminders is very similar. The main difference i
|
|||
|
||||
This distinction allows users to trade off between light-weight but stateless timers vs. more resource-demanding but stateful reminders.
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=2_xX6mkU3UCy2Plr&t=6607) demonstrates how actor timers and reminders work.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=73VqYUUvNfFw3x5_&start=12184" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
- [Learn more about actor timers.]({{< ref "actors-features-concepts.md#timers" >}})
|
||||
- [Learn more about actor reminders.]({{< ref "actors-features-concepts.md#reminders" >}})
|
||||
- [Learn more about timer and reminder error handling and failover.]({{< ref "actors-features-concepts.md#timers-and-reminders-error-handling" >}})
|
||||
|
|
|
@ -5,3 +5,12 @@ linkTitle: "Bindings"
|
|||
weight: 40
|
||||
description: Interface with or be triggered from external systems
|
||||
---
|
||||
|
||||
|
||||
{{% alert title="More about Dapr Bindings" color="primary" %}}
|
||||
Learn more about how to use Dapr Bindings:
|
||||
- Try the [Bindings quickstart]({{< ref bindings-quickstart.md >}}).
|
||||
- Explore input and output bindings via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Bindings API reference documentation]({{< ref bindings_api.md >}}).
|
||||
- Browse the supported [input and output bindings component specs]({{< ref supported-bindings >}}).
|
||||
{{% /alert %}}
|
|
@ -15,14 +15,18 @@ Using Dapr's bindings API, you can trigger your app with events coming in from e
|
|||
- Switch between bindings at runtime.
|
||||
- Build portable applications with environment-specific bindings set-up and no required code changes.
|
||||
|
||||
For example, with bindings, your microservice can respond to incoming Twilio/SMS messages without:
|
||||
For example, with bindings, your application can respond to incoming Twilio/SMS messages without:
|
||||
|
||||
- Adding or configuring a third-party Twilio SDK
|
||||
- Worrying about polling from Twilio (or using WebSockets, etc.)
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
<img src="/images/binding-overview.png" width=1000 alt="Diagram showing bindings" style="padding-bottom:25px;">
|
||||
|
||||
In the above diagram:
|
||||
- The input binding triggers a method on your application.
|
||||
- Execute output binding operations on the component, such as `"create"`.
|
||||
|
||||
Bindings are developed independently of Dapr runtime. You can [view and contribute to the bindings](https://github.com/dapr/components-contrib/tree/master/bindings).
|
||||
{{% /alert %}}
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
If you are using the HTTP Binding, then it is preferable to use [service invocation]({{< ref service_invocation_api.md >}}) instead. Read [How-To: Invoke Non-Dapr Endpoints using HTTP]({{< ref "howto-invoke-non-dapr-endpoints.md" >}}) for more information.
|
||||
|
@ -32,6 +36,10 @@ If you are using the HTTP Binding, then it is preferable to use [service invocat
|
|||
|
||||
With input bindings, you can trigger your application when an event from an external resource occurs. An optional payload and metadata may be sent with the request.
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=wlmAi7BJBWS8KNK7&t=8261) demonstrates how Dapr input binding works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=wlmAi7BJBWS8KNK7&start=8261" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
To receive events from an input binding:
|
||||
|
||||
1. Define the component YAML that describes the binding type and its metadata (connection info, etc.).
|
||||
|
@ -50,13 +58,36 @@ Read the [Create an event-driven app using input bindings guide]({{< ref howto-t
|
|||
|
||||
With output bindings, you can invoke external resources. An optional payload and metadata can be sent with the invocation request.
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=PoA4NEqL5mqNj6Il&t=7668) demonstrates how Dapr output binding works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=PoA4NEqL5mqNj6Il&start=7668" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
To invoke an output binding:
|
||||
|
||||
1. Define the component YAML that describes the binding type and its metadata (connection info, etc.).
|
||||
2. Use the HTTP endpoint or gRPC method to invoke the binding with an optional payload.
|
||||
1. Use the HTTP endpoint or gRPC method to invoke the binding with an optional payload.
|
||||
1. Specify an output operation. Output operations depend on the binding component you use, and can include:
|
||||
- `"create"`
|
||||
- `"update"`
|
||||
- `"delete"`
|
||||
- `"exec"`
|
||||
|
||||
Read the [Use output bindings to interface with external resources guide]({{< ref howto-bindings.md >}}) to get started with output bindings.
|
||||
|
||||
## Binding directions (optional)
|
||||
|
||||
You can provide the `direction` metadata field to indicate the direction(s) supported by the binding component. In doing so, the Dapr sidecar avoids the `"wait for the app to become ready"` state reducing the lifecycle dependency between the Dapr sidecar and the application:
|
||||
|
||||
- `"input"`
|
||||
- `"output"`
|
||||
- `"input, output"`
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
It is highly recommended that all bindings should include the `direction` property.
|
||||
{{% /alert %}}
|
||||
|
||||
[See a full example of the bindings `direction` metadata.]({{< ref "bindings_api.md#binding-direction-optional" >}})
|
||||
|
||||
## Try out bindings
|
||||
|
||||
### Quickstarts and tutorials
|
||||
|
|
|
@ -32,6 +32,8 @@ Create a new binding component named `checkout`. Within the `metadata` section,
|
|||
- The topic to which you'll publish the message
|
||||
- The broker
|
||||
|
||||
When creating the binding component, [specify the supported `direction` of the binding]({{< ref "bindings_api.md#binding-direction-optional" >}}).
|
||||
|
||||
{{< tabs "Self-Hosted (CLI)" Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
@ -59,7 +61,9 @@ spec:
|
|||
- name: publishTopic
|
||||
value: sample
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
value: false
|
||||
- name: direction
|
||||
value: output
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -89,7 +93,9 @@ spec:
|
|||
- name: publishTopic
|
||||
value: sample
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
value: false
|
||||
- name: direction
|
||||
value: output
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
|
|
@ -37,6 +37,8 @@ Create a new binding component named `checkout`. Within the `metadata` section,
|
|||
- The topic to which you'll publish the message
|
||||
- The broker
|
||||
|
||||
When creating the binding component, [specify the supported `direction` of the binding]({{< ref "bindings_api.md#binding-direction-optional" >}}).
|
||||
|
||||
{{< tabs "Self-Hosted (CLI)" Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
@ -64,7 +66,9 @@ spec:
|
|||
- name: publishTopic
|
||||
value: sample
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
value: false
|
||||
- name: direction
|
||||
value: input
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -94,7 +98,9 @@ spec:
|
|||
- name: publishTopic
|
||||
value: sample
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
value: false
|
||||
- name: direction
|
||||
value: input
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -256,15 +262,15 @@ async function start() {
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
### ACK-ing an event
|
||||
### ACK an event
|
||||
|
||||
Tell Dapr you've successfully processed an event in your application by returning a `200 OK` response from your HTTP handler.
|
||||
|
||||
### Rejecting an event
|
||||
### Reject an event
|
||||
|
||||
Tell Dapr the event was not processed correctly in your application and schedule it for redelivery by returning any response other than `200 OK`. For example, a `500 Error`.
|
||||
|
||||
### Specifying a custom route
|
||||
### Specify a custom route
|
||||
|
||||
By default, incoming events will be sent to an HTTP endpoint that corresponds to the name of the input binding. You can override this by setting the following metadata property in `binding.yaml`:
|
||||
|
||||
|
|
|
@ -5,3 +5,11 @@ linkTitle: "Configuration"
|
|||
weight: 80
|
||||
description: Manage and be notified of application configuration changes
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Configuration" color="primary" %}}
|
||||
Learn more about how to use Dapr Configuration:
|
||||
- Try the [Configuration quickstart]({{< ref configuration-quickstart.md >}}).
|
||||
- Explore configuration via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Configuration API reference documentation]({{< ref configuration_api.md >}}).
|
||||
- Browse the supported [configuration component specs]({{< ref supported-configuration-stores >}}).
|
||||
{{% /alert %}}
|
|
@ -4,4 +4,11 @@ title: "Cryptography"
|
|||
linkTitle: "Cryptography"
|
||||
weight: 110
|
||||
description: "Perform cryptographic operations without exposing keys to your application"
|
||||
---
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Cryptography" color="primary" %}}
|
||||
Learn more about how to use Dapr Cryptography:
|
||||
- Try the [Cryptography quickstart]({{< ref cryptography-quickstart.md >}}).
|
||||
- Explore cryptography via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Browse the supported [cryptography component specs]({{< ref supported-cryptography >}}).
|
||||
{{% /alert %}}
|
|
@ -5,3 +5,10 @@ linkTitle: "Distributed lock"
|
|||
weight: 90
|
||||
description: Distributed locks provide mutually exclusive access to shared resources from an application.
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Distributed Lock" color="primary" %}}
|
||||
Learn more about how to use Dapr Distributed Lock:
|
||||
- Explore distributed locks via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Distributed Lock API reference documentation]({{< ref distributed_lock_api.md >}}).
|
||||
- Browse the supported [distributed locks component specs]({{< ref supported-locks >}}).
|
||||
{{% /alert %}}
|
|
@ -31,6 +31,7 @@ metadata:
|
|||
name: lockstore
|
||||
spec:
|
||||
type: lock.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Observability"
|
||||
linkTitle: "Observability"
|
||||
weight: 60
|
||||
description: See and measure the message calls to components and between networked services
|
||||
---
|
||||
|
||||
This section includes guides for developers in the context of observability. See other sections for a [general overview of the observability concept]({{< ref observability-concept >}}) in Dapr and for [operations guidance on monitoring]({{< ref monitoring >}}).
|
|
@ -1,118 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Distributed tracing"
|
||||
linkTitle: "Distributed tracing"
|
||||
weight: 100
|
||||
description: "Use tracing to get visibility into your application"
|
||||
---
|
||||
|
||||
Dapr uses the Open Telemetry (OTEL) and Zipkin protocols for distributed traces. OTEL is the industry standard and is the recommended trace protocol to use.
|
||||
|
||||
Most observability tools support OTEL. For example [Google Cloud Operations](https://cloud.google.com/products/operations), [New Relic](https://newrelic.com), [Azure Monitor](https://azure.microsoft.com/services/monitor/), [Datadog](https://www.datadoghq.com), Instana, [Jaeger](https://www.jaegertracing.io/), and [SignalFX](https://www.signalfx.com/).
|
||||
|
||||
## Scenarios
|
||||
Tracing is used with service invocaton and pub/sub APIs. You can flow trace context between services that uses these APIs.
|
||||
|
||||
There are two scenarios for how tracing is used:
|
||||
|
||||
1. Dapr generates the trace context and you propagate the trace context to another service.
|
||||
2. You generate the trace context and Dapr propagates the trace context to a service.
|
||||
|
||||
### Propagating sequential service calls
|
||||
|
||||
Dapr takes care of creating the trace headers. However, when there are more than two services, you're responsible for propagating the trace headers between them. Let's go through the scenarios with examples:
|
||||
|
||||
1. Single service invocation call (`service A -> service B`)
|
||||
|
||||
Dapr generates the trace headers in service A, which are then propagated from service A to service B. No further propagation is needed.
|
||||
|
||||
2. Multiple sequential service invocation calls ( `service A -> service B -> service C`)
|
||||
|
||||
Dapr generates the trace headers at the beginning of the request in service A, which are then propagated to service B. You are now responsible for taking the headers and propagating them to service C, since this is specific to your application.
|
||||
|
||||
`service A -> service B -> propagate trace headers to -> service C` and so on to further Dapr-enabled services.
|
||||
|
||||
In other words, if the app is calling to Dapr and wants to trace with an existing span (trace header), it must always propagate to Dapr (from service B to service C in this case). Dapr always propagates trace spans to an application.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
There are no helper methods exposed in Dapr SDKs to propagate and retrieve trace context. You need to use HTTP/gRPC clients to propagate and retrieve trace headers through HTTP headers and gRPC metadata.
|
||||
{{% /alert %}}
|
||||
|
||||
3. Request is from external endpoint (for example, `from a gateway service to a Dapr-enabled service A`)
|
||||
|
||||
An external gateway ingress calls Dapr, which generates the trace headers and calls service A. Service A then calls service B and further Dapr-enabled services. You must propagate the headers from service A to service B: `Ingress -> service A -> propagate trace headers -> service B`. This is similar to case 2 above.
|
||||
|
||||
4. Pub/sub messages
|
||||
Dapr generates the trace headers in the published message topic. These trace headers are propagated to any services listening on that topic.
|
||||
|
||||
### Propagating multiple different service calls
|
||||
|
||||
In the following scenarios, Dapr does some of the work for you and you need to either create or propagate trace headers.
|
||||
|
||||
1. Multiple service calls to different services from single service
|
||||
|
||||
When you are calling multiple services from a single service (see example below), you need to propagate the trace headers:
|
||||
|
||||
```
|
||||
service A -> service B
|
||||
[ .. some code logic ..]
|
||||
service A -> service C
|
||||
[ .. some code logic ..]
|
||||
service A -> service D
|
||||
[ .. some code logic ..]
|
||||
```
|
||||
|
||||
In this case, when service A first calls service B, Dapr generates the trace headers in service A, which are then propagated to service B. These trace headers are returned in the response from service B as part of response headers. You then need to propagate the returned trace context to the next services, service C and service D, as Dapr does not know you want to reuse the same header.
|
||||
|
||||
### Generating your own trace context headers from non-Daprized applications
|
||||
|
||||
You may have chosen to generate your own trace context headers.
|
||||
Generating your own trace context headers is more unusual and typically not required when calling Dapr. However, there are scenarios where you could specifically choose to add W3C trace headers into a service call; for example, you have an existing application that does not use Dapr. In this case, Dapr still propagates the trace context headers for you. If you decide to generate trace headers yourself, there are three ways this can be done:
|
||||
|
||||
1. You can use the industry standard [OpenTelemetry SDKs](https://opentelemetry.io/docs/instrumentation/) to generate trace headers and pass these trace headers to a Dapr-enabled service. This is the preferred method.
|
||||
|
||||
2. You can use a vendor SDK that provides a way to generate W3C trace headers and pass them to a Dapr-enabled service.
|
||||
|
||||
3. You can handcraft a trace context following [W3C trace context specifications](https://www.w3.org/TR/trace-context/) and pass them to a Dapr-enabled service.
|
||||
|
||||
## W3C trace context
|
||||
|
||||
Dapr uses the standard W3C trace context headers.
|
||||
|
||||
- For HTTP requests, Dapr uses `traceparent` header.
|
||||
- For gRPC requests, Dapr uses `grpc-trace-bin` header.
|
||||
|
||||
When a request arrives without a trace ID, Dapr creates a new one. Otherwise, it passes the trace ID along the call chain.
|
||||
|
||||
Read [trace context overview]({{< ref w3c-tracing-overview >}}) for more background on W3C trace context.
|
||||
|
||||
## W3C trace headers
|
||||
These are the specific trace context headers that are generated and propagated by Dapr for HTTP and gRPC.
|
||||
|
||||
### Trace context HTTP headers format
|
||||
When propagating a trace context header from an HTTP response to an HTTP request, you copy these headers.
|
||||
|
||||
#### Traceparent header
|
||||
The traceparent header represents the incoming request in a tracing system in a common format, understood by all vendors.
|
||||
Here’s an example of a traceparent header.
|
||||
|
||||
`traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01`
|
||||
|
||||
Find the traceparent fields detailed [here](https://www.w3.org/TR/trace-context/#traceparent-header).
|
||||
|
||||
#### Tracestate header
|
||||
The tracestate header includes the parent in a potentially vendor-specific format:
|
||||
|
||||
`tracestate: congo=t61rcWkgMzE`
|
||||
|
||||
Find the tracestate fields detailed [here](https://www.w3.org/TR/trace-context/#tracestate-header).
|
||||
|
||||
### Trace context gRPC headers format
|
||||
In the gRPC API calls, trace context is passed through `grpc-trace-bin` header.
|
||||
|
||||
## Related Links
|
||||
|
||||
- [Observability concepts]({{< ref observability-concept.md >}})
|
||||
- [W3C Trace Context for distributed tracing]({{< ref w3c-tracing-overview >}})
|
||||
- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/)
|
||||
- [Observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability)
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Trace context"
|
||||
linkTitle: "Trace context"
|
||||
weight: 4000
|
||||
description: Background and scenarios for using W3C tracing with Dapr
|
||||
type: docs
|
||||
---
|
||||
|
||||
Dapr uses the [Open Telemetry protocol](https://opentelemetry.io/), which in turn uses the [W3C trace context](https://www.w3.org/TR/trace-context/) for distributed tracing for both service invocation and pub/sub messaging. Dapr generates and propagates the trace context information, which can be sent to observability tools for visualization and querying.
|
||||
|
||||
## Background
|
||||
Distributed tracing is a methodology implemented by tracing tools to follow, analyze, and debug a transaction across multiple software components. Typically, a distributed trace traverses more than one service which requires it to be uniquely identifiable. Trace context propagation passes along this unique identification.
|
||||
|
||||
In the past, trace context propagation has typically been implemented individually by each different tracing vendor. In multi-vendor environments, this causes interoperability problems, such as:
|
||||
|
||||
- Traces that are collected by different tracing vendors cannot be correlated as there is no shared unique identifier.
|
||||
- Traces that cross boundaries between different tracing vendors can not be propagated as there is no forwarded, uniformly agreed set of identification.
|
||||
- Vendor-specific metadata might be dropped by intermediaries.
|
||||
- Cloud platform vendors, intermediaries, and service providers cannot guarantee to support trace context propagation as there is no standard to follow.
|
||||
|
||||
In the past, these problems did not have a significant impact, as most applications were monitored by a single tracing vendor and stayed within the boundaries of a single platform provider. Today, an increasing number of applications are distributed and leverage multiple middleware services and cloud platforms.
|
||||
|
||||
This transformation of modern applications called for a distributed tracing context propagation standard. The [W3C trace context specification](https://www.w3.org/TR/trace-context/) defines a universally agreed-upon format for the exchange of trace context propagation data - referred to as trace context. Trace context solves the problems described above by:
|
||||
|
||||
* Providing a unique identifier for individual traces and requests, allowing trace data of multiple providers to be linked together.
|
||||
* Providing an agreed-upon mechanism to forward vendor-specific trace data and avoid broken traces when multiple tracing tools participate in a single transaction.
|
||||
* Providing an industry standard that intermediaries, platforms, and hardware providers can support.
|
||||
|
||||
A unified approach for propagating trace data improves visibility into the behavior of distributed applications, facilitating problem and performance analysis.
|
||||
|
||||
## Related Links
|
||||
- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/)
|
|
@ -5,3 +5,11 @@ linkTitle: "Publish & subscribe"
|
|||
weight: 30
|
||||
description: Secure, scalable messaging between services
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Pub/sub" color="primary" %}}
|
||||
Learn more about how to use Dapr Pub/sub:
|
||||
- Try the [Pub/sub quickstart]({{< ref pubsub-quickstart.md >}}).
|
||||
- Explore pub/sub via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Pub/sub API reference documentation]({{< ref pubsub_api.md >}}).
|
||||
- Browse the supported [pub/sub component specs]({{< ref supported-pubsub >}}).
|
||||
{{% /alert %}}
|
|
@ -658,6 +658,12 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
|
||||
In order to tell Dapr that a message was processed successfully, return a `200 OK` response. If Dapr receives any other return status code than `200`, or if your app crashes, Dapr will attempt to redeliver the message following at-least-once semantics.
|
||||
|
||||
## Demo video
|
||||
|
||||
Watch [this demo video](https://youtu.be/1dqe1k-FXJQ?si=s3gvWxRxeOsmXuE1) to learn more about pub/sub messaging with Dapr.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/1dqe1k-FXJQ?si=s3gvWxRxeOsmXuE1" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
## Next steps
|
||||
|
||||
- Try the [pub/sub tutorial](https://github.com/dapr/quickstarts/tree/master/tutorials/pub-sub).
|
||||
|
|
|
@ -14,9 +14,15 @@ Dapr uses CloudEvents to provide additional context to the event payload, enabli
|
|||
- Content-type for proper deserialization of event data
|
||||
- Verification of sender application
|
||||
|
||||
## CloudEvents example
|
||||
You can choose any of three methods for publish a CloudEvent via pub/sub:
|
||||
|
||||
A publish operation to Dapr results in a cloud event envelope containing the following fields:
|
||||
1. Send a pub/sub event, which is then wrapped by Dapr in a CloudEvent envelope.
|
||||
1. Replace specific CloudEvents attributes provided by Dapr by overriding the standard CloudEvent properties.
|
||||
1. Write your own CloudEvent envelope as part of the pub/sub event.
|
||||
|
||||
## Dapr-generated CloudEvents example
|
||||
|
||||
Sending a publish operation to Dapr automatically wraps it in a CloudEvent envelope containing the following fields:
|
||||
|
||||
- `id`
|
||||
- `source`
|
||||
|
@ -30,7 +36,9 @@ A publish operation to Dapr results in a cloud event envelope containing the fol
|
|||
- `time`
|
||||
- `datacontenttype` (optional)
|
||||
|
||||
The following example demonstrates a cloud event generated by Dapr for a publish operation to the `orders` topic that includes a W3C `traceid` unique to the message, the `data` and the fields for the CloudEvent where the data content is serialized as JSON.
|
||||
The following example demonstrates a CloudEvent generated by Dapr for a publish operation to the `orders` topic that includes:
|
||||
- A W3C `traceid` unique to the message
|
||||
- The `data` and the fields for the CloudEvent where the data content is serialized as JSON
|
||||
|
||||
```json
|
||||
{
|
||||
|
@ -55,20 +63,112 @@ As another example of a v1.0 CloudEvent, the following shows data as XML content
|
|||
|
||||
```json
|
||||
{
|
||||
"specversion" : "1.0",
|
||||
"type" : "xml.message",
|
||||
"source" : "https://example.com/message",
|
||||
"subject" : "Test XML Message",
|
||||
"id" : "id-1234-5678-9101",
|
||||
"time" : "2020-09-23T06:23:21Z",
|
||||
"datacontenttype" : "text/xml",
|
||||
"data" : "<note><to>User1</to><from>user2</from><message>hi</message></note>"
|
||||
"topic": "orders",
|
||||
"pubsubname": "order_pub_sub",
|
||||
"traceid": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01",
|
||||
"tracestate": "",
|
||||
"data" : "<note><to></to><from>user2</from><message>Order</message></note>",
|
||||
"id" : "id-1234-5678-9101",
|
||||
"specversion" : "1.0",
|
||||
"datacontenttype" : "text/xml",
|
||||
"subject" : "Test XML Message",
|
||||
"source" : "https://example.com/message",
|
||||
"type" : "xml.message",
|
||||
"time" : "2020-09-23T06:23:21Z"
|
||||
}
|
||||
```
|
||||
|
||||
## Replace Dapr generated CloudEvents values
|
||||
|
||||
Dapr automatically generates several CloudEvent properties. You can replace these generated CloudEvent properties by providing the following optional metadata key/value:
|
||||
|
||||
- `cloudevent.id`: overrides `id`
|
||||
- `cloudevent.source`: overrides `source`
|
||||
- `cloudevent.type`: overrides `type`
|
||||
- `cloudevent.traceid`: overrides `traceid`
|
||||
- `cloudevent.tracestate`: overrides `tracestate`
|
||||
- `cloudevent.traceparent`: overrides `traceparent`
|
||||
|
||||
The ability to replace CloudEvents properties using these metadata properties applies to all pub/sub components.
|
||||
|
||||
### Example
|
||||
|
||||
For example, to replace the `source` and `id` values from [the CloudEvent example above]({{< ref "#cloudevents-example" >}}) in code:
|
||||
|
||||
{{< tabs "Python" ".NET" >}}
|
||||
<!-- Python -->
|
||||
{{% codetab %}}
|
||||
|
||||
```python
|
||||
with DaprClient() as client:
|
||||
order = {'orderId': i}
|
||||
# Publish an event/message using Dapr PubSub
|
||||
result = client.publish_event(
|
||||
pubsub_name='order_pub_sub',
|
||||
topic_name='orders',
|
||||
publish_metadata={'cloudevent.id: 'd99b228f-6c73-4e78-8c4d-3f80a043d317', cloudevent.source: 'payment'}
|
||||
)
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
<!-- .NET -->
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
var order = new Order(i);
|
||||
using var client = new DaprClientBuilder().Build();
|
||||
|
||||
// Override cloudevent metadata
|
||||
var metadata = new Dictionary<string,string>() {
|
||||
{ "cloudevent.source", "payment" },
|
||||
{ "cloudevent.id", "d99b228f-6c73-4e78-8c4d-3f80a043d317" }
|
||||
}
|
||||
|
||||
// Publish an event/message using Dapr PubSub
|
||||
await client.PublishEventAsync("order_pub_sub", "orders", order, metadata);
|
||||
Console.WriteLine("Published data: " + order);
|
||||
|
||||
await Task.Delay(TimeSpan.FromSeconds(1));
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
The JSON payload then reflects the new `source` and `id` values:
|
||||
|
||||
|
||||
```json
|
||||
{
|
||||
"topic": "orders",
|
||||
"pubsubname": "order_pub_sub",
|
||||
"traceid": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01",
|
||||
"tracestate": "",
|
||||
"data": {
|
||||
"orderId": 1
|
||||
},
|
||||
"id": "d99b228f-6c73-4e78-8c4d-3f80a043d317",
|
||||
"specversion": "1.0",
|
||||
"datacontenttype": "application/json; charset=utf-8",
|
||||
"source": "payment",
|
||||
"type": "com.dapr.event.sent",
|
||||
"time": "2020-09-23T06:23:21Z",
|
||||
"traceparent": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01"
|
||||
}
|
||||
```
|
||||
|
||||
{{% alert title="Important" color="warning" %}}
|
||||
While you can replace `traceid`/`traceparent` and `tracestate`, doing this may interfere with tracing events and report inconsistent results in tracing tools. It's recommended to use Open Telementry for distributed traces. [Learn more about distributed tracing.]({{< ref tracing-overview.md >}})
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
## Publish your own CloudEvent
|
||||
|
||||
If you want to use your own CloudEvent, make sure to specify the [`datacontenttype`]({{< ref "pubsub-overview.md#setting-message-content-types" >}}) as `application/cloudevents+json`.
|
||||
|
||||
If the CloudEvent that was authored by the app does not contain the [minimum required fields](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#required-attributes) in the CloudEvent specification, the message is rejected. Dapr adds the following fields to the CloudEvent if they are missing:
|
||||
|
||||
- `time`
|
||||
|
@ -92,7 +192,7 @@ You can add additional fields to a custom CloudEvent that are not part of the of
|
|||
Publish a CloudEvent to the `orders` topic:
|
||||
|
||||
```bash
|
||||
dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{"specversion" : "1.0", "type" : "com.dapr.cloudevent.sent", "source" : "testcloudeventspubsub", "subject" : "Cloud Events Test", "id" : "someCloudEventId", "time" : "2021-08-02T09:00:00Z", "datacontenttype" : "application/cloudevents+json", "data" : {"orderId": "100"}}'
|
||||
dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{\"orderId\": \"100\"}'
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
|
|
@ -25,13 +25,14 @@ The diagram below is an example of how dead letter topics work. First a message
|
|||
The following YAML shows how to configure a subscription with a dead letter topic named `poisonMessages` for messages consumed from the `orders` topic. This subscription is scoped to an app with a `checkout` ID.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: order
|
||||
spec:
|
||||
topic: orders
|
||||
route: /checkout
|
||||
routes:
|
||||
default: /checkout
|
||||
pubsubname: pubsub
|
||||
deadLetterTopic: poisonMessages
|
||||
scopes:
|
||||
|
@ -86,13 +87,16 @@ spec:
|
|||
Remember to now configure a subscription to handling the dead letter topics. For example you can create another declarative subscription to receive these on the same or a different application. The example below shows the checkout application subscribing to the `poisonMessages` topic with another subscription and sending these to be handled by the `/failedmessages` endpoint.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: deadlettertopics
|
||||
spec:
|
||||
topic: poisonMessages
|
||||
route: /failedMessages
|
||||
routes:
|
||||
rules:
|
||||
- match:
|
||||
path: /failedMessages
|
||||
pubsubname: pubsub
|
||||
scopes:
|
||||
- checkout
|
||||
|
|
|
@ -13,7 +13,7 @@ Publish and subscribe (pub/sub) enables microservices to communicate with each o
|
|||
|
||||
An intermediary message broker copies each message from a publisher's input channel to an output channel for all subscribers interested in that message. This pattern is especially useful when you need to decouple microservices from one another.
|
||||
|
||||
<img src="/images/pubsub-overview-pattern.png" width=1000>
|
||||
<img src="/images/pubsub-overview-pattern.png" width=1000 style="padding-bottom:25px;">
|
||||
|
||||
<br></br>
|
||||
|
||||
|
@ -32,15 +32,17 @@ When using pub/sub in Dapr:
|
|||
1. The pub/sub building block makes calls into a Dapr pub/sub component that encapsulates a specific message broker.
|
||||
1. To receive messages on a topic, Dapr subscribes to the pub/sub component on behalf of your service with a topic and delivers the messages to an endpoint on your service when they arrive.
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=FMg2Y7bRuljKism-&t=5384) demonstrates how Dapr pub/sub works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=FMg2Y7bRuljKism-&start=5384" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
In the diagram below, a "shipping" service and an "email" service have both subscribed to topics published by a "cart" service. Each service loads pub/sub component configuration files that point to the same pub/sub message broker component; for example: Redis Streams, NATS Streaming, Azure Service Bus, or GCP pub/sub.
|
||||
|
||||
<img src="/images/pubsub-overview-components.png" width=1000>
|
||||
<br></br>
|
||||
<img src="/images/pubsub-overview-components.png" width=1000 style="padding-bottom:25px;">
|
||||
|
||||
In the diagram below, the Dapr API posts an "order" topic from the publishing "cart" service to "order" endpoints on the "shipping" and "email" subscribing services.
|
||||
|
||||
<img src="/images/pubsub-overview-publish-API.png" width=1000>
|
||||
<br></br>
|
||||
<img src="/images/pubsub-overview-publish-API.png" width=1000 style="padding-bottom:25px;">
|
||||
|
||||
[View the complete list of pub/sub components that Dapr supports]({{< ref supported-pubsub >}}).
|
||||
|
||||
|
@ -100,16 +102,29 @@ Dapr solves multi-tenancy at-scale with [namespaces for consumer groups]({{< ref
|
|||
|
||||
### At-least-once guarantee
|
||||
|
||||
Dapr guarantees at-least-once semantics for message delivery. When an application publishes a message to a topic using the pub/sub API, Dapr ensures the message is delivered *at least once* to every subscriber.
|
||||
Dapr guarantees at-least-once semantics for message delivery. When an application publishes a message to a topic using the pub/sub API, Dapr ensures the message is delivered *at least once* to every subscriber.
|
||||
|
||||
Even if the message fails to deliver, or your application crashes, Dapr attempts to redeliver the message until successful delivery.
|
||||
|
||||
All Dapr pub/sub components support the at-least-once guarantee.
|
||||
|
||||
### Consumer groups and competing consumers pattern
|
||||
|
||||
Dapr automatically handles the burden of dealing with concepts like consumer groups and competing consumers pattern. The competing consumers pattern refers to multiple application instances using a single consumer group. When multiple instances of the same application (running same Dapr app ID) subscribe to a topic, Dapr delivers each message to *only one instance of **that** application*. This concept is illustrated in the diagram below.
|
||||
Dapr handles the burden of dealing with consumer groups and the competing consumers pattern. In the competing consumers pattern, multiple application instances using a single consumer group compete for the message. Dapr enforces the competing consumer pattern when replicas use the same `app-id` without explict consumer group overrides.
|
||||
|
||||
When multiple instances of the same application (with same `app-id`) subscribe to a topic, Dapr delivers each message to *only one instance of **that** application*. This concept is illustrated in the diagram below.
|
||||
|
||||
<img src="/images/pubsub-overview-pattern-competing-consumers.png" width=1000>
|
||||
<br></br>
|
||||
|
||||
Similarly, if two different applications (with different app-IDs) subscribe to the same topic, Dapr delivers each message to *only one instance of **each** application*.
|
||||
Similarly, if two different applications (with different `app-id`) subscribe to the same topic, Dapr delivers each message to *only one instance of **each** application*.
|
||||
|
||||
Not all Dapr pub/sub components support the competing consumer pattern. Currently, the following (non-exhaustive) pub/sub components support this:
|
||||
|
||||
- [Apache Kafka]({{< ref setup-apache-kafka >}})
|
||||
- [Azure Service Bus Queues]({{< ref setup-azure-servicebus-queues >}})
|
||||
- [RabbitMQ]({{< ref setup-rabbitmq >}})
|
||||
- [Redis Streams]({{< ref setup-redis-pubsub >}})
|
||||
|
||||
### Scoping topics for added security
|
||||
|
||||
|
|
|
@ -141,13 +141,14 @@ $app->start();
|
|||
Similarly, you can subscribe to raw events declaratively by adding the `rawPayload` metadata entry to your subscription specification.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: myevent-subscription
|
||||
spec:
|
||||
topic: deathStarStatus
|
||||
route: /dsstatus
|
||||
routes:
|
||||
default: /dsstatus
|
||||
pubsubname: pubsub
|
||||
metadata:
|
||||
rawPayload: "true"
|
||||
|
|
|
@ -22,13 +22,14 @@ The examples below demonstrate pub/sub messaging between a `checkout` app and an
|
|||
You can subscribe declaratively to a topic using an external component file. This example uses a YAML component file named `subscription.yaml`:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: order
|
||||
spec:
|
||||
topic: orders
|
||||
route: /checkout
|
||||
routes:
|
||||
default: /checkout
|
||||
pubsubname: pubsub
|
||||
scopes:
|
||||
- orderprocessing
|
||||
|
@ -186,7 +187,11 @@ The `/checkout` endpoint matches the `route` defined in the subscriptions and th
|
|||
|
||||
### Programmatic subscriptions
|
||||
|
||||
The programmatic approach returns the `routes` JSON structure within the code, unlike the declarative approach's `route` YAML structure. In the example below, you define the values found in the [declarative YAML subscription](#declarative-subscriptions) above within the application code.
|
||||
The dynamic programmatic approach returns the `routes` JSON structure within the code, unlike the declarative approach's `route` YAML structure.
|
||||
|
||||
> **Note:** Programmatic subscriptions are only read once during application start-up. You cannot _dynamically_ add new programmatic subscriptions, only at new ones at compile time.
|
||||
|
||||
In the example below, you define the values found in the [declarative YAML subscription](#declarative-subscriptions) above within the application code.
|
||||
|
||||
{{< tabs ".NET" Java Python JavaScript Go>}}
|
||||
|
||||
|
@ -218,7 +223,7 @@ Both of the handlers defined above also need to be mapped to configure the `dapr
|
|||
app.UseEndpoints(endpoints =>
|
||||
{
|
||||
endpoints.MapSubscribeHandler();
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -316,6 +321,7 @@ app.listen(port, () => console.log(`consumer app listening on port ${port}!`))
|
|||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
|
|
@ -5,3 +5,11 @@ linkTitle: "Secrets management"
|
|||
weight: 70
|
||||
description: Securely access secrets from your application
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Secrets" color="primary" %}}
|
||||
Learn more about how to use Dapr Secrets:
|
||||
- Try the [Secrets quickstart]({{< ref secrets-quickstart.md >}}).
|
||||
- Explore secrets via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Secrets API reference documentation]({{< ref secrets_api.md >}}).
|
||||
- Browse the supported [secrets component specs]({{< ref supported-secret-stores >}}).
|
||||
{{% /alert %}}
|
|
@ -18,6 +18,10 @@ Dapr's dedicated secrets building block API makes it easier for developers to co
|
|||
1. Retrieve secrets using the Dapr secrets API in the application code.
|
||||
1. Optionally, reference secrets in Dapr component files.
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=3bmNSSyIEIVSF-Ej&t=9931) demonstrates how Dapr secrets management works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=3bmNSSyIEIVSF-Ej&start=9931" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
## Features
|
||||
|
||||
The secrets management API building block brings several features to your application.
|
||||
|
|
|
@ -5,3 +5,10 @@ linkTitle: "Service invocation"
|
|||
weight: 10
|
||||
description: Perform direct, secure, service-to-service method calls
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Service Invocation" color="primary" %}}
|
||||
Learn more about how to use Dapr Service Invocation:
|
||||
- Try the [Service Invocation quickstart]({{< ref serviceinvocation-quickstart.md >}}).
|
||||
- Explore service invocation via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Service Invocation API reference documentation]({{< ref service_invocation_api.md >}}).
|
||||
{{% /alert %}}
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "How-To: Invoke services using HTTP"
|
||||
linkTitle: "How-To: Invoke with HTTP"
|
||||
description: "Call between services using service invocation"
|
||||
weight: 2000
|
||||
weight: 20
|
||||
---
|
||||
|
||||
This article demonstrates how to deploy services each with an unique application ID for other services to discover and call endpoints on them using service invocation over HTTP.
|
||||
|
@ -19,26 +19,22 @@ This article demonstrates how to deploy services each with an unique application
|
|||
|
||||
Dapr allows you to assign a global, unique ID for your app. This ID encapsulates the state for your application, regardless of the number of instances it may have.
|
||||
|
||||
{{< tabs Dotnet Java Python Go JavaScript Kubernetes>}}
|
||||
{{< tabs Python JavaScript ".NET" Java Go Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- python3 checkout/app.py
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 dotnet run
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 dotnet run
|
||||
|
||||
dapr run --app-id order-processor --app-port 8001 --app-protocol http --dapr-http-port 3501 -- python3 order-processor/app.py
|
||||
```
|
||||
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --app-protocol https --dapr-http-port 3500 -- python3 checkout/app.py
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https dotnet run
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https dotnet run
|
||||
|
||||
dapr run --app-id order-processor --app-port 8001 --app-protocol https --dapr-http-port 3501 -- python3 order-processor/app.py
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -46,21 +42,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
{{% codetab %}}
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- npm start
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 mvn spring-boot:run
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 mvn spring-boot:run
|
||||
|
||||
dapr run --app-id order-processor --app-port 5001 --app-protocol http --dapr-http-port 3501 -- npm start
|
||||
```
|
||||
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- npm start
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https mvn spring-boot:run
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https mvn spring-boot:run
|
||||
|
||||
dapr run --app-id order-processor --app-port 5001 --dapr-http-port 3501 --app-protocol https -- npm start
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -68,21 +60,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
{{% codetab %}}
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- dotnet run
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 -- python3 CheckoutService.py
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 -- python3 OrderProcessingService.py
|
||||
|
||||
dapr run --app-id order-processor --app-port 7001 --app-protocol http --dapr-http-port 3501 -- dotnet run
|
||||
```
|
||||
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- dotnet run
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https -- python3 CheckoutService.py
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https -- python3 OrderProcessingService.py
|
||||
|
||||
dapr run --app-id order-processor --app-port 7001 --dapr-http-port 3501 --app-protocol https -- dotnet run
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -90,21 +78,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
{{% codetab %}}
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- java -jar target/CheckoutService-0.0.1-SNAPSHOT.jar
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 go run CheckoutService.go
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 go run OrderProcessingService.go
|
||||
|
||||
dapr run --app-id order-processor --app-port 9001 --app-protocol http --dapr-http-port 3501 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
|
||||
```
|
||||
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- java -jar target/CheckoutService-0.0.1-SNAPSHOT.jar
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https go run CheckoutService.go
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https go run OrderProcessingService.go
|
||||
|
||||
dapr run --app-id order-processor --app-port 9001 --dapr-http-port 3501 --app-protocol https -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -112,21 +96,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
{{% codetab %}}
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --dapr-http-port 3500 -- go run .
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 npm start
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 npm start
|
||||
|
||||
dapr run --app-id order-processor --app-port 6006 --app-protocol http --dapr-http-port 3501 -- go run .
|
||||
```
|
||||
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- go run .
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https npm start
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https npm start
|
||||
|
||||
dapr run --app-id order-processor --app-port 6006 --dapr-http-port 3501 --app-protocol https -- go run .
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -156,7 +136,7 @@ spec:
|
|||
app: <language>-app
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "orderprocessingservice"
|
||||
dapr.io/app-id: "order-processor"
|
||||
dapr.io/app-port: "6001"
|
||||
...
|
||||
```
|
||||
|
@ -173,88 +153,7 @@ To invoke an application using Dapr, you can use the `invoke` API on any Dapr in
|
|||
|
||||
Below are code examples that leverage Dapr SDKs for service invocation.
|
||||
|
||||
{{< tabs Dotnet Java Python Go Javascript>}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
//dependencies
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Net.Http;
|
||||
using System.Net.Http.Headers;
|
||||
using System.Threading.Tasks;
|
||||
using Dapr.Client;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using System.Threading;
|
||||
|
||||
//code
|
||||
namespace EventService
|
||||
{
|
||||
class Program
|
||||
{
|
||||
static async Task Main(string[] args)
|
||||
{
|
||||
while(true) {
|
||||
System.Threading.Thread.Sleep(5000);
|
||||
Random random = new Random();
|
||||
int orderId = random.Next(1,1000);
|
||||
using var client = new DaprClientBuilder().Build();
|
||||
|
||||
//Using Dapr SDK to invoke a method
|
||||
var result = client.CreateInvokeMethodRequest(HttpMethod.Get, "checkout", "checkout/" + orderId);
|
||||
await client.InvokeMethodAsync(result);
|
||||
Console.WriteLine("Order requested: " + orderId);
|
||||
Console.WriteLine("Result: " + result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```java
|
||||
//dependencies
|
||||
import io.dapr.client.DaprClient;
|
||||
import io.dapr.client.DaprClientBuilder;
|
||||
import io.dapr.client.domain.HttpExtension;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
//code
|
||||
@SpringBootApplication
|
||||
public class OrderProcessingServiceApplication {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(OrderProcessingServiceApplication.class);
|
||||
|
||||
public static void main(String[] args) throws InterruptedException{
|
||||
while(true) {
|
||||
TimeUnit.MILLISECONDS.sleep(5000);
|
||||
Random random = new Random();
|
||||
int orderId = random.nextInt(1000-1) + 1;
|
||||
DaprClient daprClient = new DaprClientBuilder().build();
|
||||
//Using Dapr SDK to invoke a method
|
||||
var result = daprClient.invokeMethod(
|
||||
"checkout",
|
||||
"checkout/" + orderId,
|
||||
null,
|
||||
HttpExtension.GET,
|
||||
String.class
|
||||
);
|
||||
log.info("Order requested: " + orderId);
|
||||
log.info("Result: " + result);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
{{< tabs Python JavaScript ".NET" Java Go >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
|
@ -263,20 +162,18 @@ public class OrderProcessingServiceApplication {
|
|||
import random
|
||||
from time import sleep
|
||||
import logging
|
||||
from dapr.clients import DaprClient
|
||||
import requests
|
||||
|
||||
#code
|
||||
logging.basicConfig(level = logging.INFO)
|
||||
while True:
|
||||
sleep(random.randrange(50, 5000) / 1000)
|
||||
orderId = random.randint(1, 1000)
|
||||
with DaprClient() as daprClient:
|
||||
#Using Dapr SDK to invoke a method
|
||||
result = daprClient.invoke_method(
|
||||
"checkout",
|
||||
f"checkout/{orderId}",
|
||||
data=b'',
|
||||
http_verb="GET"
|
||||
#Invoke a service
|
||||
result = requests.post(
|
||||
url='%s/orders' % (base_url),
|
||||
data=json.dumps(order),
|
||||
headers=headers
|
||||
)
|
||||
logging.basicConfig(level = logging.INFO)
|
||||
logging.info('Order requested: ' + str(orderId))
|
||||
|
@ -287,50 +184,9 @@ while True:
|
|||
|
||||
{{% codetab %}}
|
||||
|
||||
```go
|
||||
//dependencies
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
"strconv"
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
|
||||
)
|
||||
|
||||
//code
|
||||
type Order struct {
|
||||
orderName string
|
||||
orderNum string
|
||||
}
|
||||
|
||||
func main() {
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(5000)
|
||||
orderId := rand.Intn(1000-1) + 1
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer client.Close()
|
||||
ctx := context.Background()
|
||||
//Using Dapr SDK to invoke a method
|
||||
result, err := client.InvokeMethod(ctx, "checkout", "checkout/" + strconv.Itoa(orderId), "get")
|
||||
log.Println("Order requested: " + strconv.Itoa(orderId))
|
||||
log.Println("Result: ")
|
||||
log.Println(result)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```javascript
|
||||
//dependencies
|
||||
import { DaprClient, HttpMethod, CommunicationProtocolEnum } from '@dapr/dapr';
|
||||
import axios from "axios";
|
||||
|
||||
//code
|
||||
const daprHost = "127.0.0.1";
|
||||
|
@ -346,18 +202,11 @@ var main = function() {
|
|||
}
|
||||
}
|
||||
|
||||
async function start(orderId) {
|
||||
const client = new DaprClient({
|
||||
daprHost: daprHost,
|
||||
daprPort: process.env.DAPR_HTTP_PORT,
|
||||
communicationProtocol: CommunicationProtocolEnum.HTTP
|
||||
});
|
||||
|
||||
//Using Dapr SDK to invoke a method
|
||||
const result = await client.invoker.invoke('checkoutservice' , "checkout/" + orderId , HttpMethod.GET);
|
||||
//Invoke a service
|
||||
const result = await axios.post('order-processor' , "orders/" + orderId , axiosConfig);
|
||||
console.log("Order requested: " + orderId);
|
||||
console.log("Result: " + result);
|
||||
}
|
||||
console.log("Result: " + result.config.data);
|
||||
|
||||
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
|
@ -368,6 +217,158 @@ main();
|
|||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
//dependencies
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Net.Http;
|
||||
using System.Net.Http.Headers;
|
||||
using System.Threading.Tasks;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using System.Threading;
|
||||
|
||||
//code
|
||||
namespace EventService
|
||||
{
|
||||
class Program
|
||||
{
|
||||
static async Task Main(string[] args)
|
||||
{
|
||||
while(true) {
|
||||
await Task.Delay(5000)
|
||||
var random = new Random();
|
||||
var orderId = random.Next(1,1000);
|
||||
|
||||
//Using Dapr SDK to invoke a method
|
||||
var order = new Order("1");
|
||||
var orderJson = JsonSerializer.Serialize<Order>(order);
|
||||
var content = new StringContent(orderJson, Encoding.UTF8, "application/json");
|
||||
|
||||
var httpClient = DaprClient.CreateInvokeHttpClient();
|
||||
await httpClient.PostAsJsonAsync($"http://order-processor/orders", content);
|
||||
Console.WriteLine("Order requested: " + orderId);
|
||||
Console.WriteLine("Result: " + result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```java
|
||||
//dependencies
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.http.HttpClient;
|
||||
import java.net.http.HttpRequest;
|
||||
import java.net.http.HttpResponse;
|
||||
import java.time.Duration;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
//code
|
||||
@SpringBootApplication
|
||||
public class CheckoutServiceApplication {
|
||||
private static final HttpClient httpClient = HttpClient.newBuilder()
|
||||
.version(HttpClient.Version.HTTP_2)
|
||||
.connectTimeout(Duration.ofSeconds(10))
|
||||
.build();
|
||||
|
||||
public static void main(String[] args) throws InterruptedException, IOException {
|
||||
while (true) {
|
||||
TimeUnit.MILLISECONDS.sleep(5000);
|
||||
Random random = new Random();
|
||||
int orderId = random.nextInt(1000 - 1) + 1;
|
||||
|
||||
// Create a Map to represent the request body
|
||||
Map<String, Object> requestBody = new HashMap<>();
|
||||
requestBody.put("orderId", orderId);
|
||||
// Add other fields to the requestBody Map as needed
|
||||
|
||||
HttpRequest request = HttpRequest.newBuilder()
|
||||
.POST(HttpRequest.BodyPublishers.ofString(new JSONObject(requestBody).toString()))
|
||||
.uri(URI.create(dapr_url))
|
||||
.header("Content-Type", "application/json")
|
||||
.header("dapr-app-id", "order-processor")
|
||||
.build();
|
||||
|
||||
HttpResponse<String> response = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
|
||||
|
||||
System.out.println("Order passed: " + orderId);
|
||||
TimeUnit.MILLISECONDS.sleep(1000);
|
||||
|
||||
log.info("Order requested: " + orderId);
|
||||
log.info("Result: " + response.body());
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
daprHttpPort := os.Getenv("DAPR_HTTP_PORT")
|
||||
if daprHttpPort == "" {
|
||||
daprHttpPort = "3500"
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(5000)
|
||||
orderId := rand.Intn(1000-1) + 1
|
||||
|
||||
url := fmt.Sprintf("http://localhost:%s/checkout/%v", daprHttpPort, orderId)
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Adding target app id as part of the header
|
||||
req.Header.Add("dapr-app-id", "order-processor")
|
||||
|
||||
// Invoking a service
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Additional URL formats
|
||||
|
@ -432,4 +433,4 @@ For more information on tracing and logs, see the [observability]({{< ref observ
|
|||
## Related Links
|
||||
|
||||
- [Service invocation overview]({{< ref service-invocation-overview.md >}})
|
||||
- [Service invocation API specification]({{< ref service_invocation_api.md >}})
|
||||
- [Service invocation API specification]({{< ref service_invocation_api.md >}})
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "How-To: Invoke Non-Dapr Endpoints using HTTP"
|
||||
linkTitle: "How-To: Invoke Non-Dapr Endpoints"
|
||||
description: "Call Non-Dapr endpoints from Dapr applications using service invocation"
|
||||
weight: 2000
|
||||
weight: 40
|
||||
---
|
||||
|
||||
This article demonstrates how to call a non-Dapr endpoint using Dapr over HTTP.
|
||||
|
@ -47,7 +47,7 @@ The diagram below is an overview of how Dapr's service invocation works when inv
|
|||
## Using an HTTPEndpoint resource or FQDN URL for non-Dapr endpoints
|
||||
There are two ways to invoke a non-Dapr endpoint when communicating either to Dapr applications or non-Dapr applications. A Dapr application can invoke a non-Dapr endpoint by providing one of the following:
|
||||
|
||||
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}}) guide for an example.
|
||||
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}}) guide for an example.
|
||||
|
||||
```sh
|
||||
localhost:3500/v1.0/invoke/<HTTPEndpoint-name>/method/<my-method>
|
||||
|
@ -70,7 +70,7 @@ There are two ways to invoke a non-Dapr endpoint when communicating either to Da
|
|||
```
|
||||
|
||||
### Using appId when calling Dapr enabled applications
|
||||
AppIDs are always used to call Dapr applications with the `appID` and `my-method. Read the [How-To: Invoke services using HTTP]({{< ref howto-invoke-discover-services.md >}}) guide for more information. For example:
|
||||
AppIDs are always used to call Dapr applications with the `appID` and `my-method``. Read the [How-To: Invoke services using HTTP]({{< ref howto-invoke-discover-services.md >}}) guide for more information. For example:
|
||||
|
||||
```sh
|
||||
localhost:3500/v1.0/invoke/<appID>/method/<my-method>
|
||||
|
@ -81,7 +81,7 @@ curl http://localhost:3602/v1.0/invoke/orderprocessor/method/checkout
|
|||
|
||||
## Related Links
|
||||
|
||||
- [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}})
|
||||
- [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}})
|
||||
- [Service invocation overview]({{< ref service-invocation-overview.md >}})
|
||||
- [Service invocation API specification]({{< ref service_invocation_api.md >}})
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "How-To: Invoke services using gRPC"
|
||||
linkTitle: "How-To: Invoke with gRPC"
|
||||
description: "Call between services using service invocation"
|
||||
weight: 3000
|
||||
weight: 30
|
||||
---
|
||||
|
||||
This article describe how to use Dapr to connect services using gRPC.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
type: docs
|
||||
title: "How to: Service invocation across namespaces"
|
||||
linkTitle: "How to: Service invocation namespaces"
|
||||
weight: 1000
|
||||
weight: 50
|
||||
description: "Call between services deployed to different namespaces"
|
||||
---
|
||||
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
type: docs
|
||||
title: "Service invocation overview"
|
||||
linkTitle: "Overview"
|
||||
weight: 900
|
||||
weight: 10
|
||||
description: "Overview of the service invocation API building block"
|
||||
---
|
||||
|
||||
Using service invocation, your application can reliably and securely communicate with other applications using the standard [gRPC](https://grpc.io) or [HTTP](https://www.w3.org/Protocols/) protocols.
|
||||
|
||||
In many microservice-based applications multiple services need the ability to communicate with one another. This inter-service communication requires that application developers handle problems like:
|
||||
In many microservice-based applications, multiple services need the ability to communicate with one another. This inter-service communication requires that application developers handle problems like:
|
||||
|
||||
- **Service discovery.** How do I discover my different services?
|
||||
- **Standardizing API calls between services.** How do I invoke methods between services?
|
||||
|
@ -25,6 +25,10 @@ Dapr uses a sidecar architecture. To invoke an application using Dapr:
|
|||
- Each application communicates with its own instance of Dapr.
|
||||
- The Dapr instances discover and communicate with each other.
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=mtLMrajE5wVXJYz8&t=3598) demonstrates how Dapr service invocation works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=Flsd8PRlF8nYu693&start=3598" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
The diagram below is an overview of how Dapr's service invocation works between two Dapr-ized applications.
|
||||
|
||||
<img src="/images/service-invocation-overview.png" width=800 alt="Diagram showing the steps of service invocation">
|
||||
|
@ -61,7 +65,6 @@ In the event of call failures and transient errors, service invocation provides
|
|||
|
||||
By default, all calls between applications are traced and metrics are gathered to provide insights and diagnostics for applications. This is especially important in production scenarios, providing call graphs and metrics on the calls between your services. For more information read about [observability]({{< ref observability-concept.md >}}).
|
||||
|
||||
|
||||
### Access control
|
||||
|
||||
With access policies, applications can control:
|
||||
|
@ -83,7 +86,7 @@ Dapr provides round robin load balancing of service invocation requests with the
|
|||
|
||||
The diagram below shows an example of how this works. If you have 1 instance of an application with app ID `FrontEnd` and 3 instances of application with app ID `Cart` and you call from `FrontEnd` app to `Cart` app, Dapr round robins' between the 3 instances. These instance can be on the same machine or on different machines. .
|
||||
|
||||
<img src="/images/service-invocation-mdns-round-robin.png" width=600 alt="Diagram showing the steps of service invocation">
|
||||
<img src="/images/service-invocation-mdns-round-robin.png" width=600 alt="Diagram showing the steps of service invocation" style="padding-bottom:25px;">
|
||||
|
||||
**Note**: App ID is unique per _application_, not application instance. Regardless how many instances of that application exist (due to scaling), all of them will share the same app ID.
|
||||
|
||||
|
@ -97,7 +100,7 @@ Following the above call sequence, suppose you have the applications as describe
|
|||
|
||||
The diagram below shows sequence 1-7 again on a local machine showing the API calls:
|
||||
|
||||
<img src="/images/service-invocation-overview-example.png" width=800 />
|
||||
<img src="/images/service-invocation-overview-example.png" width=800 style="padding-bottom:25px;">
|
||||
|
||||
1. The Node.js app has a Dapr app ID of `nodeapp`. The python app invokes the Node.js app's `neworder` method by POSTing `http://localhost:3500/v1.0/invoke/nodeapp/method/neworder`, which first goes to the python app's local Dapr sidecar.
|
||||
2. Dapr discovers the Node.js app's location using name resolution component (in this case mDNS while self-hosted) which runs on your local machine.
|
||||
|
@ -135,5 +138,5 @@ For quick testing, try using the Dapr CLI for service invocation:
|
|||
## Next steps
|
||||
- Read the [service invocation API specification]({{< ref service_invocation_api.md >}}). This reference guide for service invocation describes how to invoke methods on other services.
|
||||
- Understand the [service invocation performance numbers]({{< ref perf-service-invocation.md >}}).
|
||||
- Take a look at [observability]({{< ref monitoring.md >}}). Here you can dig into Dapr's monitoring tools like tracing, metrics and logging.
|
||||
- Take a look at [observability]({{< ref observability >}}). Here you can dig into Dapr's monitoring tools like tracing, metrics and logging.
|
||||
- Read up on our [security practices]({{< ref security-concept.md >}}) around mTLS encryption, token authentication, and endpoint authorization.
|
||||
|
|
|
@ -5,3 +5,11 @@ linkTitle: "State management"
|
|||
weight: 20
|
||||
description: Create long running stateful services
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr State Management" color="primary" %}}
|
||||
Learn more about how to use Dapr State Management:
|
||||
- Try the [State Management quickstart]({{< ref statemanagement-quickstart.md >}}).
|
||||
- Explore state management via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [State Management API reference documentation]({{< ref state_api.md >}}).
|
||||
- Browse the supported [state management component specs]({{< ref supported-state-stores >}}).
|
||||
{{% /alert %}}
|
|
@ -11,7 +11,11 @@ Your application can use Dapr's state management API to save, read, and query ke
|
|||
- Use **HTTP POST** to save or query key/value pairs.
|
||||
- Use **HTTP GET** to read a specific key and have its value returned.
|
||||
|
||||
<img src="/images/state-management-overview.png" width=1000>
|
||||
<img src="/images/state-management-overview.png" width=1000 style="padding-bottom:25px;">
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=2_xX6mkU3UCy2Plr&t=6607) demonstrates how Dapr state management works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/0y7ne6teHT4?si=2_xX6mkU3UCy2Plr&start=6607" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
## Features
|
||||
|
||||
|
|
|
@ -16,9 +16,15 @@ When state TTL has native support in the state store component, Dapr forwards th
|
|||
|
||||
When a TTL is not specified, the default behavior of the state store is retained.
|
||||
|
||||
## Persisting state (ignoring an existing TTL)
|
||||
## Explicit persistence bypassing globally defined TTL
|
||||
|
||||
To explicitly persist a state (ignoring any TTLs set for the key), specify a `ttlInSeconds` value of `-1`.
|
||||
Persisting state applies to all state stores that let you specify a default TTL used for all data, either:
|
||||
- Setting a global TTL value via a Dapr component, or
|
||||
- When creating the state store outside of Dapr and setting a global TTL value.
|
||||
|
||||
When no specific TTL is specified, the data expires after that global TTL period of time. This is not facilitated by Dapr.
|
||||
|
||||
In addition, all state stores also support the option to _explicitly_ persist data. This means you can ignore the default database policy (which may have been set outside of Dapr or via a Dapr Component) to indefinitely retain a given database record. You can do this by setting `ttlInSeconds` to the value of `-1`. This value indicates to ignore any TTL value set.
|
||||
|
||||
## Supported components
|
||||
|
||||
|
@ -71,7 +77,7 @@ using Dapr.Client;
|
|||
|
||||
await client.SaveStateAsync(storeName, stateKeyName, state, metadata: new Dictionary<string, string>() {
|
||||
{
|
||||
"metadata.ttlInSeconds", "120"
|
||||
"ttlInSeconds", "120"
|
||||
}
|
||||
});
|
||||
```
|
||||
|
|
|
@ -4,4 +4,11 @@ title: "Workflow"
|
|||
linkTitle: "Workflow"
|
||||
weight: 100
|
||||
description: "Orchestrate logic across various microservices"
|
||||
---
|
||||
---
|
||||
|
||||
{{% alert title="More about Dapr Workflow" color="primary" %}}
|
||||
Learn more about how to use Dapr Workflow:
|
||||
- Try the [Workflow quickstart]({{< ref workflow-quickstart.md >}}).
|
||||
- Explore workflow via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Workflow API reference documentation]({{< ref workflow_api.md >}}).
|
||||
{{% /alert %}}
|
|
@ -141,12 +141,10 @@ Sometimes workflows will need to wait for events that are raised by external sys
|
|||
|
||||
External events have a _name_ and a _payload_ and are delivered to a single workflow instance. Workflows can create "_wait for external event_" tasks that subscribe to external events and _await_ those tasks to block execution until the event is received. The workflow can then read the payload of these events and make decisions about which next steps to take. External events can be processed serially or in parallel. External events can be raised by other workflows or by workflow code.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
The ability to raise external events to workflows is not included in the alpha version of Dapr's workflow API.
|
||||
{{% /alert %}}
|
||||
|
||||
Workflows can also wait for multiple external event signals of the same name, in which case they are dispatched to the corresponding workflow tasks in a first-in, first-out (FIFO) manner. If a workflow receives an external event signal but has not yet created a "wait for external event" task, the event will be saved into the workflow's history and consumed immediately after the workflow requests the event.
|
||||
|
||||
Learn more about [external system interaction.]({{< ref "workflow-patterns.md#external-system-interaction" >}})
|
||||
|
||||
## Limitations
|
||||
|
||||
### Workflow determinism and code restraints
|
||||
|
|
|
@ -16,7 +16,7 @@ The durable, resilient Dapr Workflow capability:
|
|||
|
||||
- Offers a built-in workflow runtime for driving Dapr Workflow execution.
|
||||
- Provides SDKs for authoring workflows in code, using any language.
|
||||
- Provides HTTP and gRPC APIs for managing workflows (start, query, suspend/resume, terminate).
|
||||
- Provides HTTP and gRPC APIs for managing workflows (start, query, pause/resume, raise event, terminate, purge).
|
||||
- Integrates with any other workflow runtime via workflow components.
|
||||
|
||||
<img src="/images/workflow-overview/workflow-overview.png" width=800 alt="Diagram showing basics of Dapr Workflow">
|
||||
|
@ -56,7 +56,10 @@ Same as Dapr actors, you can schedule reminder-like durable delays for any time
|
|||
When you create an application with workflow code and run it with Dapr, you can call specific workflows that reside in the application. Each individual workflow can be:
|
||||
|
||||
- Started or terminated through a POST request
|
||||
- Queried through a GET request
|
||||
- Triggered to deliver a named event through a POST request
|
||||
- Paused and then resumed through a POST request
|
||||
- Purged from your state store through a POST request
|
||||
- Queried for workflow status through a GET request
|
||||
|
||||
[Learn more about how manage a workflow using HTTP calls.]({{< ref workflow_api.md >}})
|
||||
|
||||
|
|
|
@ -25,9 +25,10 @@ While the pattern is simple, there are many complexities hidden in the implement
|
|||
|
||||
Dapr Workflow solves these complexities by allowing you to implement the task chaining pattern concisely as a simple function in the programming language of your choice, as shown in the following example.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
{{< tabs ".NET" Python >}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--dotnet-->
|
||||
|
||||
```csharp
|
||||
// Expotential backoff retry policy that survives long outages
|
||||
|
@ -45,7 +46,6 @@ try
|
|||
var result1 = await context.CallActivityAsync<string>("Step1", wfInput, retryOptions);
|
||||
var result2 = await context.CallActivityAsync<byte[]>("Step2", result1, retryOptions);
|
||||
var result3 = await context.CallActivityAsync<long[]>("Step3", result2, retryOptions);
|
||||
var result4 = await context.CallActivityAsync<Guid[]>("Step4", result3, retryOptions);
|
||||
return string.Join(", ", result4);
|
||||
}
|
||||
catch (TaskFailedException) // Task failures are surfaced as TaskFailedException
|
||||
|
@ -56,14 +56,61 @@ catch (TaskFailedException) // Task failures are surfaced as TaskFailedException
|
|||
}
|
||||
```
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
In the example above, `"Step1"`, `"Step2"`, `"Step3"`, and `"MyCompensation"` represent workflow activities, which are functions in your code that actually implement the steps of the workflow. For brevity, these activity implementations are left out of this example.
|
||||
{{% /alert %}}
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--python-->
|
||||
|
||||
```python
|
||||
import dapr.ext.workflow as wf
|
||||
|
||||
|
||||
def task_chain_workflow(ctx: wf.DaprWorkflowContext, wf_input: int):
|
||||
try:
|
||||
result1 = yield ctx.call_activity(step1, input=wf_input)
|
||||
result2 = yield ctx.call_activity(step2, input=result1)
|
||||
result3 = yield ctx.call_activity(step3, input=result2)
|
||||
except Exception as e:
|
||||
yield ctx.call_activity(error_handler, input=str(e))
|
||||
raise
|
||||
return [result1, result2, result3]
|
||||
|
||||
|
||||
def step1(ctx, activity_input):
|
||||
print(f'Step 1: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input + 1
|
||||
|
||||
|
||||
def step2(ctx, activity_input):
|
||||
print(f'Step 2: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input * 2
|
||||
|
||||
|
||||
def step3(ctx, activity_input):
|
||||
print(f'Step 3: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input ^ 2
|
||||
|
||||
|
||||
def error_handler(ctx, error):
|
||||
print(f'Executing error handler: {error}.')
|
||||
# Do some compensating work
|
||||
```
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Workflow retry policies will be available in a future version of the Python SDK.
|
||||
{{% /alert %}}
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
In the example above, `"Step1"`, `"Step2"`, `"MyCompensation"`, etc. represent workflow activities, which are functions in your code that actually implement the steps of the workflow. For brevity, these activity implementations are left out of this example.
|
||||
{{% /alert %}}
|
||||
|
||||
As you can see, the workflow is expressed as a simple series of statements in the programming language of your choice. This allows any engineer in the organization to quickly understand the end-to-end flow without necessarily needing to understand the end-to-end system architecture.
|
||||
|
||||
Behind the scenes, the Dapr Workflow runtime:
|
||||
|
@ -88,9 +135,10 @@ In addition to the challenges mentioned in [the previous pattern]({{< ref "workf
|
|||
|
||||
Dapr Workflows provides a way to express the fan-out/fan-in pattern as a simple function, as shown in the following example:
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
{{< tabs ".NET" Python >}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--dotnet-->
|
||||
|
||||
```csharp
|
||||
// Get a list of N work items to process in parallel.
|
||||
|
@ -114,6 +162,46 @@ await context.CallActivityAsync("PostResults", sum);
|
|||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--python-->
|
||||
|
||||
```python
|
||||
import time
|
||||
from typing import List
|
||||
import dapr.ext.workflow as wf
|
||||
|
||||
|
||||
def batch_processing_workflow(ctx: wf.DaprWorkflowContext, wf_input: int):
|
||||
# get a batch of N work items to process in parallel
|
||||
work_batch = yield ctx.call_activity(get_work_batch, input=wf_input)
|
||||
|
||||
# schedule N parallel tasks to process the work items and wait for all to complete
|
||||
parallel_tasks = [ctx.call_activity(process_work_item, input=work_item) for work_item in work_batch]
|
||||
outputs = yield wf.when_all(parallel_tasks)
|
||||
|
||||
# aggregate the results and send them to another activity
|
||||
total = sum(outputs)
|
||||
yield ctx.call_activity(process_results, input=total)
|
||||
|
||||
|
||||
def get_work_batch(ctx, batch_size: int) -> List[int]:
|
||||
return [i + 1 for i in range(batch_size)]
|
||||
|
||||
|
||||
def process_work_item(ctx, work_item: int) -> int:
|
||||
print(f'Processing work item: {work_item}.')
|
||||
time.sleep(5)
|
||||
result = work_item * 2
|
||||
print(f'Work item {work_item} processed. Result: {result}.')
|
||||
return result
|
||||
|
||||
|
||||
def process_results(ctx, final_result: int):
|
||||
print(f'Final result: {final_result}.')
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
The key takeaways from this example are:
|
||||
|
@ -214,9 +302,10 @@ Depending on the business needs, there may be a single monitor or there may be m
|
|||
|
||||
Dapr Workflow supports this pattern natively by allowing you to implement _eternal workflows_. Rather than writing infinite while-loops ([which is an anti-pattern]({{< ref "workflow-features-concepts.md#infinite-loops-and-eternal-workflows" >}})), Dapr Workflow exposes a _continue-as-new_ API that workflow authors can use to restart a workflow function from the beginning with a new input.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
{{< tabs ".NET" Python >}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--dotnet-->
|
||||
|
||||
```csharp
|
||||
public override async Task<object> RunAsync(WorkflowContext context, MyEntityState myEntityState)
|
||||
|
@ -256,6 +345,53 @@ public override async Task<object> RunAsync(WorkflowContext context, MyEntitySta
|
|||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--python-->
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
from datetime import timedelta
|
||||
import random
|
||||
import dapr.ext.workflow as wf
|
||||
|
||||
|
||||
@dataclass
|
||||
class JobStatus:
|
||||
job_id: str
|
||||
is_healthy: bool
|
||||
|
||||
|
||||
def status_monitor_workflow(ctx: wf.DaprWorkflowContext, job: JobStatus):
|
||||
# poll a status endpoint associated with this job
|
||||
status = yield ctx.call_activity(check_status, input=job)
|
||||
if not ctx.is_replaying:
|
||||
print(f"Job '{job.job_id}' is {status}.")
|
||||
|
||||
if status == "healthy":
|
||||
job.is_healthy = True
|
||||
next_sleep_interval = 60 # check less frequently when healthy
|
||||
else:
|
||||
if job.is_healthy:
|
||||
job.is_healthy = False
|
||||
ctx.call_activity(send_alert, input=f"Job '{job.job_id}' is unhealthy!")
|
||||
next_sleep_interval = 5 # check more frequently when unhealthy
|
||||
|
||||
yield ctx.create_timer(fire_at=ctx.current_utc_datetime + timedelta(seconds=next_sleep_interval))
|
||||
|
||||
# restart from the beginning with a new JobStatus input
|
||||
ctx.continue_as_new(job)
|
||||
|
||||
|
||||
def check_status(ctx, _) -> str:
|
||||
return random.choice(["healthy", "unhealthy"])
|
||||
|
||||
|
||||
def send_alert(ctx, message: str):
|
||||
print(f'*** Alert: {message}')
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
A workflow implementing the monitor pattern can loop forever or it can terminate itself gracefully by not calling _continue-as-new_.
|
||||
|
@ -284,9 +420,10 @@ The following diagram illustrates this flow.
|
|||
|
||||
The following example code shows how this pattern can be implemented using Dapr Workflow.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
{{< tabs ".NET" Python >}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--dotnet-->
|
||||
|
||||
```csharp
|
||||
public override async Task<OrderResult> RunAsync(WorkflowContext context, OrderPayload order)
|
||||
|
@ -331,13 +468,73 @@ In the example above, `RequestApprovalActivity` is the name of a workflow activi
|
|||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--python-->
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
from datetime import timedelta
|
||||
import dapr.ext.workflow as wf
|
||||
|
||||
|
||||
@dataclass
|
||||
class Order:
|
||||
cost: float
|
||||
product: str
|
||||
quantity: int
|
||||
|
||||
def __str__(self):
|
||||
return f'{self.product} ({self.quantity})'
|
||||
|
||||
|
||||
@dataclass
|
||||
class Approval:
|
||||
approver: str
|
||||
|
||||
@staticmethod
|
||||
def from_dict(dict):
|
||||
return Approval(**dict)
|
||||
|
||||
|
||||
def purchase_order_workflow(ctx: wf.DaprWorkflowContext, order: Order):
|
||||
# Orders under $1000 are auto-approved
|
||||
if order.cost < 1000:
|
||||
return "Auto-approved"
|
||||
|
||||
# Orders of $1000 or more require manager approval
|
||||
yield ctx.call_activity(send_approval_request, input=order)
|
||||
|
||||
# Approvals must be received within 24 hours or they will be canceled.
|
||||
approval_event = ctx.wait_for_external_event("approval_received")
|
||||
timeout_event = ctx.create_timer(timedelta(hours=24))
|
||||
winner = yield wf.when_any([approval_event, timeout_event])
|
||||
if winner == timeout_event:
|
||||
return "Cancelled"
|
||||
|
||||
# The order was approved
|
||||
yield ctx.call_activity(place_order, input=order)
|
||||
approval_details = Approval.from_dict(approval_event.get_result())
|
||||
return f"Approved by '{approval_details.approver}'"
|
||||
|
||||
|
||||
def send_approval_request(_, order: Order) -> None:
|
||||
print(f'*** Sending approval request for order: {order}')
|
||||
|
||||
|
||||
def place_order(_, order: Order) -> None:
|
||||
print(f'*** Placing order: {order}')
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
The code that delivers the event to resume the workflow execution is external to the workflow. Workflow events can be delivered to a waiting workflow instance using the [raise event]({{< ref "howto-manage-workflow.md#raise-an-event" >}}) workflow management API, as shown in the following example:
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
{{< tabs ".NET" Python >}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--dotnet-->
|
||||
|
||||
```csharp
|
||||
// Raise the workflow event to the waiting workflow
|
||||
|
@ -350,6 +547,23 @@ await daprClient.RaiseWorkflowEventAsync(
|
|||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
<!--python-->
|
||||
|
||||
```python
|
||||
from dapr.clients import DaprClient
|
||||
from dataclasses import asdict
|
||||
|
||||
with DaprClient() as d:
|
||||
d.raise_workflow_event(
|
||||
instance_id=instance_id,
|
||||
workflow_component="dapr",
|
||||
event_name="approval_received",
|
||||
event_data=asdict(Approval("Jane Doe")))
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
External events don't have to be directly triggered by humans. They can also be triggered by other systems. For example, a workflow may need to pause and wait for a payment to be received. In this case, a payment system might publish an event to a pub/sub topic on receipt of a payment, and a listener on that topic can raise an event to the workflow using the raise event workflow API.
|
||||
|
|
|
@ -3,5 +3,5 @@ type: docs
|
|||
title: "Authenticate to Azure"
|
||||
linkTitle: "Authenticate to Azure"
|
||||
weight: 1600
|
||||
description: "Learn about authenticating Azure components using Azure Active Directory or Managed Service Identities"
|
||||
description: "Learn about authenticating Azure components using Azure Active Directory or Managed Identities"
|
||||
---
|
|
@ -9,59 +9,74 @@ aliases:
|
|||
weight: 10000
|
||||
---
|
||||
|
||||
Certain Azure components for Dapr offer support for the *common Azure authentication layer*, which enables applications to access data stored in Azure resources by authenticating with Azure Active Directory (Azure AD). Thanks to this:
|
||||
- Administrators can leverage all the benefits of fine-tuned permissions with Role-Based Access Control (RBAC).
|
||||
- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Service Identities (MSI)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
Most Azure components for Dapr support authenticating with Azure AD (Azure Active Directory). Thanks to this:
|
||||
|
||||
- Administrators can leverage all the benefits of fine-tuned permissions with Azure Role-Based Access Control (RBAC).
|
||||
- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Identities (MI)](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) and [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview). These offer the ability to authenticate your applications without having to manage sensitive credentials.
|
||||
|
||||
## About authentication with Azure AD
|
||||
|
||||
Azure AD is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services.
|
||||
|
||||
Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Key Vault, Cosmos DB, etc.
|
||||
Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Service Bus, Azure Key Vault, Azure Cosmos DB, Azure Database for Postgres, Azure SQL, etc.
|
||||
|
||||
> In Azure terminology, an application is also called a "Service Principal".
|
||||
|
||||
Some Azure components offer alternative authentication methods, such as systems based on "master keys" or "shared keys". Although both master keys and shared keys are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD. Using Azure AD offers benefits like the following.
|
||||
Some Azure components offer alternative authentication methods, such as systems based on "shared keys" or "access tokens". Although these are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD whenever possible to take advantage of many benefits, including:
|
||||
|
||||
### Managed Service Identities
|
||||
- [Managed Identities and Workload Identity](#managed-identities-and-workload-identity)
|
||||
- [Role-Based Access Control](#role-based-access-control)
|
||||
- [Auditing](#auditing)
|
||||
- [(Optional) Authentication using certificates](#optional-authentication-using-certificates)
|
||||
|
||||
With Managed Service Identities (MSI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service, an identity for your application can be assigned at the infrastructure level.
|
||||
### Managed Identities and Workload Identity
|
||||
|
||||
With Managed Identities (MI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service (such as Azure VMs, Azure Container Apps, Azure Web Apps, etc), an identity for your application can be assigned at the infrastructure level.
|
||||
|
||||
Once using MI, your code doesn't have to deal with credentials, which:
|
||||
|
||||
Once using MSI, your code doesn't have to deal with credentials, which:
|
||||
- Removes the challenge of managing credentials safely
|
||||
- Allows greater separation of concerns between development and operations teams
|
||||
- Reduces the number of people with access to credentials
|
||||
- Simplifies operational aspects–especially when multiple environments are used
|
||||
|
||||
### Role-based Access Control
|
||||
Applications running on Azure Kubernetes Service can similarly leverage [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) to automatically provide an identity to individual pods.
|
||||
|
||||
When using Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make it read-only.
|
||||
### Role-Based Access Control
|
||||
|
||||
When using Azure Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make the access read-only.
|
||||
|
||||
### Auditing
|
||||
|
||||
Using Azure AD provides an improved auditing experience for access.
|
||||
Using Azure AD provides an improved auditing experience for access. Tenant administrators can consult audit logs to track authentication requests.
|
||||
|
||||
### (Optional) Authenticate using certificates
|
||||
### (Optional) Authentication using certificates
|
||||
|
||||
While Azure AD allows you to use MSI or RBAC, you still have the option to authenticate using certificates.
|
||||
While Azure AD allows you to use MI, you still have the option to authenticate using certificates.
|
||||
|
||||
## Support for other Azure environments
|
||||
|
||||
By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China, Azure Government, or Azure Germany, you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values:
|
||||
By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China or Azure Government ("sovereign clouds"), you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values:
|
||||
|
||||
- Azure public cloud (default): `"AZUREPUBLICCLOUD"`
|
||||
- Azure China: `"AZURECHINACLOUD"`
|
||||
- Azure Government: `"AZUREUSGOVERNMENTCLOUD"`
|
||||
- Azure Germany: `"AZUREGERMANCLOUD"`
|
||||
- Azure public cloud (default): `"AzurePublicCloud"`
|
||||
- Azure China: `"AzureChinaCloud"`
|
||||
- Azure Government: `"AzureUSGovernmentCloud"`
|
||||
|
||||
> Support for sovereign clouds is experimental.
|
||||
|
||||
## Credentials metadata fields
|
||||
|
||||
To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component]({{< ref "#example-usage-in-a-dapr-component" >}}).
|
||||
To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component](#example-usage-in-a-dapr-component).
|
||||
|
||||
### Metadata options
|
||||
|
||||
Depending on how you've passed credentials to your Dapr services, you have multiple metadata options.
|
||||
Depending on how you've passed credentials to your Dapr services, you have multiple metadata options.
|
||||
|
||||
- [Using client credentials](#authenticating-using-client-credentials)
|
||||
- [Using a certificate](#authenticating-using-a-certificate)
|
||||
- [Using Managed Identities (MI)](#authenticating-with-managed-identities-mi)
|
||||
- [Using Workload Identity on AKS](#authenticating-with-workload-identity-on-aks)
|
||||
- [Using Azure CLI credentials (development-only)](#authenticating-using-azure-cli-credentials-development-only)
|
||||
|
||||
#### Authenticating using client credentials
|
||||
|
||||
|
@ -73,7 +88,7 @@ Depending on how you've passed credentials to your Dapr services, you have multi
|
|||
|
||||
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
|
||||
|
||||
#### Authenticating using a PFX certificate
|
||||
#### Authenticating using a certificate
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------|--------|--------|--------|
|
||||
|
@ -85,27 +100,30 @@ When running on Kubernetes, you can also use references to Kubernetes secrets fo
|
|||
|
||||
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
|
||||
|
||||
#### Authenticating with Managed Service Identities (MSI)
|
||||
#### Authenticating with Managed Identities (MI)
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|-----------------|----------|----------------------------|------------------------------------------|
|
||||
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
|
||||
Using MSI, you're not required to specify any value, although you may pass `azureClientId` if needed.
|
||||
Using Managed Identities, the `azureClientId` field is generally recommended. The field is optional when using a system-assigned identity, but may be required when using user-assigned identities.
|
||||
|
||||
### Aliases
|
||||
#### Authenticating with Workload Identity on AKS
|
||||
|
||||
For backwards-compatibility reasons, the following values in the metadata are supported as aliases. Their use is discouraged.
|
||||
When running on Azure Kubernetes Service (AKS), you can authenticate components using Workload Identity. Refer to the Azure AKS documentation on [enabling Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) for your Kubernetes resources.
|
||||
|
||||
| Metadata key | Aliases (supported but deprecated) |
|
||||
|----------------------------|------------------------------------|
|
||||
| `azureTenantId` | `spnTenantId`, `tenantId` |
|
||||
| `azureClientId` | `spnClientId`, `clientId` |
|
||||
| `azureClientSecret` | `spnClientSecret`, `clientSecret` |
|
||||
| `azureCertificate` | `spnCertificate` |
|
||||
| `azureCertificateFile` | `spnCertificateFile` |
|
||||
| `azureCertificatePassword` | `spnCertificatePassword` |
|
||||
#### Authenticating using Azure CLI credentials (development-only)
|
||||
|
||||
> **Important:** This authentication method is recommended for **development only**.
|
||||
|
||||
This authentication method can be useful while developing on a local machine. You will need:
|
||||
|
||||
- The [Azure CLI installed](https://learn.microsoft.com/cli/azure/install-azure-cli)
|
||||
- Have successfully authenticated using the `az login` command
|
||||
|
||||
When Dapr is running on a host where there are credentials available for the Azure CLI, components can use those to authenticate automatically if no other authentication method is configuration.
|
||||
|
||||
Using this authentication method does not require setting any metadata option.
|
||||
|
||||
### Example usage in a Dapr component
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ Save the output values returned; you'll need them for Dapr to authenticate with
|
|||
```
|
||||
|
||||
When adding the returned values to your Dapr component's metadata:
|
||||
|
||||
- `appId` is the value for `azureClientId`
|
||||
- `password` is the value for `azureClientSecret` (this was randomly-generated)
|
||||
- `tenant` is the value for `azureTenantId`
|
||||
|
@ -93,11 +94,12 @@ Save the output values returned; you'll need them for Dapr to authenticate with
|
|||
```
|
||||
|
||||
When adding the returned values to your Dapr component's metadata:
|
||||
|
||||
- `appId` is the value for `azureClientId`
|
||||
- `tenant` is the value for `azureTenantId`
|
||||
- `fileWithCertAndPrivateKey` indicates the location of the self-signed PFX certificate and private key. Use the contents of that file as `azureCertificate` (or write it to a file on the server and use `azureCertificateFile`)
|
||||
|
||||
> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as _PFX (PKCS#12)_.
|
||||
> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as PFX (PKCS#12).
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
|
@ -122,26 +124,13 @@ Expected output:
|
|||
Service Principal ID: 1d0ccf05-5427-4b5e-8eb4-005ac5f9f163
|
||||
```
|
||||
|
||||
The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID).
|
||||
|
||||
**The Service Principal ID** is:
|
||||
- Defined within an Azure tenant
|
||||
- Used to grant access to Azure resources to an application
|
||||
|
||||
The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID). The Service Principal ID is defined within an Azure tenant and used to grant access to Azure resources to an application
|
||||
You'll use the Service Principal ID to grant permissions to an application to access Azure resources.
|
||||
|
||||
Meanwhile, **the client ID** is used by your application to authenticate. You'll use the client ID in Dapr manifests to configure authentication with Azure services.
|
||||
|
||||
Keep in mind that the Service Principal that was just created does not have access to any Azure resource by default. Access will need to be granted to each resource as needed, as documented in the docs for the components.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
This step is different from the [official Azure documentation](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli). The short-hand commands included in the official documentation creates a Service Principal that has broad `read-write` access to all Azure resources in your subscription, which:
|
||||
|
||||
- Grants your Service Principal more access than you likely desire.
|
||||
- Applies _only_ to the Azure management plane (Azure Resource Manager, or ARM), which is irrelevant for Dapr components, which are designed to interact with the data plane of various services.
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
## Next steps
|
||||
|
||||
{{< button text="Use MSI >>" page="howto-msi.md" >}}
|
||||
{{< button text="Use Managed Identities >>" page="howto-mi.md" >}}
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
---
|
||||
type: docs
|
||||
title: "How to: Use Managed Service Identities"
|
||||
linkTitle: "How to: Use MSI"
|
||||
title: "How to: Use Managed Identities"
|
||||
linkTitle: "How to: Use MI"
|
||||
weight: 40000
|
||||
description: "Learn how to use Managed Service Identities"
|
||||
aliases:
|
||||
- "/developing-applications/integrations/azure/azure-authentication/howto-msi/"
|
||||
description: "Learn how to use Managed Identities"
|
||||
---
|
||||
|
||||
Using MSI, authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity.
|
||||
Using Managed Identities (MI), authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity.
|
||||
|
||||
For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credential.
|
||||
For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credentials.
|
||||
|
||||
To get started with managed identities, you need to assign an identity to a new or existing Azure resource. The instructions depend on the service use. Check the following official documentation for the most appropriate instructions:
|
||||
|
||||
|
@ -19,8 +21,9 @@ To get started with managed identities, you need to assign an identity to a new
|
|||
- [Azure Virtual Machines Scale Sets (VMSS)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vmss)
|
||||
- [Azure Container Instance (ACI)](https://docs.microsoft.com/azure/container-instances/container-instances-managed-identity)
|
||||
|
||||
Dapr supports both system-assigned and user-assigned identities.
|
||||
|
||||
After assigning a managed identity to your Azure resource, you will have credentials such as:
|
||||
After assigning an identity to your Azure resource, you will have credentials such as:
|
||||
|
||||
```json
|
||||
{
|
||||
|
@ -31,7 +34,7 @@ After assigning a managed identity to your Azure resource, you will have credent
|
|||
}
|
||||
```
|
||||
|
||||
From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your Service Principal.
|
||||
From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your identity.
|
||||
|
||||
## Next steps
|
||||
|
|
@ -14,4 +14,10 @@ The recommended approach for installing Dapr on AKS is to use the AKS Dapr exten
|
|||
If you install Dapr through the AKS extension, best practice is to continue using the extension for future management of Dapr _instead of the Dapr CLI_. Combining the two tools can cause conflicts and result in undesired behavior.
|
||||
{{% /alert %}}
|
||||
|
||||
Prerequisites for using the Dapr extension for AKS:
|
||||
- [An Azure subscription](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)
|
||||
- [The latest version of the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli)
|
||||
- [An existing AKS cluster](https://learn.microsoft.com/azure/aks/tutorial-kubernetes-deploy-cluster)
|
||||
- [The Azure Kubernetes Service RBAC Admin role](https://learn.microsoft.com/azure/role-based-access-control/built-in-roles#azure-kubernetes-service-rbac-admin)
|
||||
|
||||
{{< button text="Learn more about the Dapr extension for AKS" link="https://learn.microsoft.com/azure/aks/dapr" >}}
|
||||
|
|
|
@ -132,7 +132,7 @@ The following steps will show how to create an app that exposes a server for wit
|
|||
"github.com/golang/protobuf/ptypes/empty"
|
||||
|
||||
commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1"
|
||||
pb "github.com/dapr/go-sdk/dapr/proto/runtime/v1"
|
||||
pb "github.com/dapr/dapr/pkg/proto/runtime/v1"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
```
|
||||
|
|
|
@ -176,7 +176,7 @@ Below are the supported parameters for VS Code tasks. These parameters are equiv
|
|||
| `appProtocol` | Tells Dapr which protocol your application is using. Valid options are `http`, `grpc`, `https`, `grpcs`, `h2c`. Default is `http`. | No | `"appProtocol": "http"`
|
||||
| `args` | Sets a list of arguments to pass on to the Dapr app | No | "args": []
|
||||
| `componentsPath` | Path for components directory. If empty, components will not be loaded. | No | `"componentsPath": "./components"`
|
||||
| `config` | Tells Dapr which Configuration CRD to use | No | `"config": "./config"`
|
||||
| `config` | Tells Dapr which Configuration resource to use | No | `"config": "./config"`
|
||||
| `controlPlaneAddress` | Address for a Dapr control plane | No | `"controlPlaneAddress": "http://localhost:1366/"`
|
||||
| `enableProfiling` | Enable profiling | No | `"enableProfiling": false`
|
||||
| `enableMtls` | Enables automatic mTLS for daprd to daprd communication channels | No | `"enableMtls": false`
|
||||
|
|
|
@ -83,7 +83,7 @@ apps:
|
|||
appProtocol: http
|
||||
appPort: 8080
|
||||
appHealthCheckPath: "/healthz"
|
||||
command: ["python3" "app.py"]
|
||||
command: ["python3", "app.py"]
|
||||
appLogDestination: file # (optional), can be file, console or fileAndConsole. default is fileAndConsole.
|
||||
daprdLogDestination: file # (optional), can be file, console or fileAndConsole. default is file.
|
||||
- appID: backend # optional
|
||||
|
|
|
@ -11,34 +11,25 @@ The Dapr SDKs are the easiest way for you to get Dapr into your application. Cho
|
|||
|
||||
## SDK packages
|
||||
|
||||
- **Client SDK**: The Dapr client allows you to invoke Dapr building block APIs and perform actions such as:
|
||||
- [Invoke]({{< ref service-invocation >}}) methods on other services
|
||||
- Store and get [state]({{< ref state-management >}})
|
||||
- [Publish and subscribe]({{< ref pubsub >}}) to message topics
|
||||
- Interact with external resources through input and output [bindings]({{< ref bindings >}})
|
||||
- Get [secrets]({{< ref secrets >}}) from secret stores
|
||||
- Interact with [virtual actors]({{< ref actors >}})
|
||||
- **Server extensions**: The Dapr service extensions allow you to create services that can:
|
||||
- Be [invoked]({{< ref service-invocation >}}) by other services
|
||||
- [Subscribe]({{< ref pubsub >}}) to topics
|
||||
- **Actor SDK**: The Dapr Actor SDK allows you to build virtual actors with:
|
||||
- Methods that can be [invoked]({{< ref "howto-actors.md#actor-method-invocation" >}}) by other services
|
||||
- [State]({{< ref "howto-actors.md#actor-state-management" >}}) that can be stored and retrieved
|
||||
- [Timers]({{< ref "howto-actors.md#actor-timers" >}}) with callbacks
|
||||
- Persistent [reminders]({{< ref "howto-actors.md#actor-reminders" >}})
|
||||
Select your [preferred language below]({{< ref "#sdk-languages" >}}) to learn more about client, server, actor, and workflow packages.
|
||||
|
||||
- **Client**: The Dapr client allows you to invoke Dapr building block APIs and perform each building block's actions
|
||||
- **Server extensions**: The Dapr service extensions allow you to create services that can be invoked by other services and subscribe to topics
|
||||
- **Actor**: The Dapr Actor SDK allows you to build virtual actors with methods, state, timers, and persistent reminders
|
||||
- **Workflow**: Dapr Workflow makes it easy for you to write long running business logic and integrations in a reliable way
|
||||
|
||||
## SDK languages
|
||||
|
||||
| Language | Status | Client SDK | Server extensions | Actor SDK |
|
||||
|----------|:------|:----------:|:-----------:|:---------:|
|
||||
| [.NET]({{< ref dotnet >}}) | Stable | ✔ | [ASP.NET Core](https://github.com/dapr/dotnet-sdk/tree/master/examples/AspNetCore) | ✔ |
|
||||
| [Python]({{< ref python >}}) | Stable | ✔ | [gRPC]({{< ref python-grpc.md >}}) <br />[FastAPI]({{< ref python-fastapi.md >}})<br />[Flask]({{< ref python-flask.md >}})| ✔ |
|
||||
| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ |
|
||||
| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ |
|
||||
| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ |
|
||||
| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ |
|
||||
| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | |
|
||||
| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | |
|
||||
| Language | Status | Client | Server extensions | Actor | Workflow |
|
||||
|----------|:------|:----------:|:-----------:|:---------:|:---------:|
|
||||
| [.NET]({{< ref dotnet >}}) | Stable | ✔ | [ASP.NET Core](https://github.com/dapr/dotnet-sdk/tree/master/examples/AspNetCore) | ✔ | ✔ |
|
||||
| [Python]({{< ref python >}}) | Stable | ✔ | [gRPC]({{< ref python-grpc.md >}}) <br />[FastAPI]({{< ref python-fastapi.md >}})<br />[Flask]({{< ref python-flask.md >}})| ✔ | ✔ |
|
||||
| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ | |
|
||||
| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ | |
|
||||
| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ | |
|
||||
| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ | |
|
||||
| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | | |
|
||||
| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | | |
|
||||
|
||||
## Further reading
|
||||
|
||||
|
|
|
@ -189,6 +189,8 @@ spec:
|
|||
metadata:
|
||||
- name: schedule
|
||||
value: "@every 10s" # valid cron schedule
|
||||
- name: direction
|
||||
value: "input" # direction of the cron binding
|
||||
```
|
||||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
@ -216,6 +218,8 @@ spec:
|
|||
metadata:
|
||||
- name: url # Required
|
||||
value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10"
|
||||
- name: direction
|
||||
value: "output" # direction of the postgresql binding
|
||||
```
|
||||
|
||||
In the YAML file:
|
||||
|
@ -391,6 +395,8 @@ spec:
|
|||
metadata:
|
||||
- name: schedule
|
||||
value: "@every 10s" # valid cron schedule
|
||||
- name: direction
|
||||
value: "input" # direction of the cron binding
|
||||
```
|
||||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
@ -418,6 +424,8 @@ spec:
|
|||
metadata:
|
||||
- name: url # Required
|
||||
value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10"
|
||||
- name: direction
|
||||
value: "output" # direction of the postgresql binding
|
||||
```
|
||||
|
||||
In the YAML file:
|
||||
|
@ -595,6 +603,8 @@ spec:
|
|||
metadata:
|
||||
- name: schedule
|
||||
value: "@every 10s" # valid cron schedule
|
||||
- name: direction
|
||||
value: "input" # direction of the cron binding
|
||||
```
|
||||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
@ -622,6 +632,8 @@ spec:
|
|||
metadata:
|
||||
- name: url # Required
|
||||
value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10"
|
||||
- name: direction
|
||||
value: "output" # direction of the postgresql binding
|
||||
```
|
||||
|
||||
In the YAML file:
|
||||
|
@ -805,6 +817,8 @@ spec:
|
|||
metadata:
|
||||
- name: schedule
|
||||
value: "@every 10s" # valid cron schedule
|
||||
- name: direction
|
||||
value: "input" # direction of the cron binding
|
||||
```
|
||||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
@ -832,6 +846,8 @@ spec:
|
|||
metadata:
|
||||
- name: url # Required
|
||||
value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10"
|
||||
- name: direction
|
||||
value: "output" # direction of the postgresql binding
|
||||
```
|
||||
|
||||
In the YAML file:
|
||||
|
@ -1017,6 +1033,8 @@ spec:
|
|||
metadata:
|
||||
- name: schedule
|
||||
value: "@every 10s" # valid cron schedule
|
||||
- name: direction
|
||||
value: "input" # direction of the cron binding
|
||||
```
|
||||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
@ -1044,6 +1062,8 @@ spec:
|
|||
metadata:
|
||||
- name: url # Required
|
||||
value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10"
|
||||
- name: direction
|
||||
value: "output" # direction of the postgresql binding
|
||||
```
|
||||
|
||||
In the YAML file:
|
||||
|
|
|
@ -600,7 +600,7 @@ go build .
|
|||
Run the `order-processor` service alongside a Dapr sidecar.
|
||||
|
||||
```bash
|
||||
dapr run --app-port 6001 --app-id order-processor --app-protocol http --dapr-http-port 3501 -- go run .
|
||||
dapr run --app-port 6006 --app-id order-processor --app-protocol http --dapr-http-port 3501 -- go run .
|
||||
```
|
||||
|
||||
Each order is received via an HTTP POST request and processed by the
|
||||
|
|
|
@ -12,12 +12,6 @@ The workflow building block is currently in **alpha**.
|
|||
|
||||
Let's take a look at the Dapr [Workflow building block]({{< ref workflow >}}). In this Quickstart, you'll create a simple console application to demonstrate Dapr's workflow programming model and the workflow management APIs.
|
||||
|
||||
The `order-processor` console app starts and manages the lifecycle of the `OrderProcessingWorkflow` workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks:
|
||||
- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow
|
||||
- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase
|
||||
- `ProcessPaymentActivity`: Processes and authorizes the payment
|
||||
- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value
|
||||
|
||||
In this guide, you'll:
|
||||
|
||||
- Run the `order-processor` application.
|
||||
|
@ -26,13 +20,19 @@ In this guide, you'll:
|
|||
|
||||
<img src="/images/workflow-quickstart-overview.png" width=800 style="padding-bottom:15px;">
|
||||
|
||||
Currently, you can experience the Dapr Workflow using the .NET SDK.
|
||||
|
||||
{{< tabs ".NET" "Python" >}}
|
||||
|
||||
<!-- .NET -->
|
||||
{{% codetab %}}
|
||||
|
||||
The `order-processor` console app starts and manages the lifecycle of an order processing workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks:
|
||||
- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow
|
||||
- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase
|
||||
- `ProcessPaymentActivity`: Processes and authorizes the payment
|
||||
- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value
|
||||
|
||||
|
||||
### Step 1: Pre-requisites
|
||||
|
||||
For this example, you will need:
|
||||
|
@ -259,6 +259,16 @@ The `Activities` directory holds the four workflow activities used by the workfl
|
|||
<!-- Python -->
|
||||
{{% codetab %}}
|
||||
|
||||
The `order-processor` console app starts and manages the `order_processing_workflow`, which simulates purchasing items from a store. The workflow consists of five unique workflow activities, or tasks:
|
||||
|
||||
- `notify_activity`: Utilizes a logger to print out messages throughout the workflow. These messages notify you when:
|
||||
- You have insufficient inventory
|
||||
- Your payment couldn't be processed, etc.
|
||||
- `process_payment_activity`: Processes and authorizes the payment.
|
||||
- `verify_inventory_activity`: Checks the state store to ensure there is enough inventory present for purchase.
|
||||
- `update_inventory_activity`: Removes the requested items from the state store and updates the store with the new remaining inventory value.
|
||||
- `request_approval_activity`: Seeks approval from the manager if payment is greater than 50,000 USD.
|
||||
|
||||
### Step 1: Pre-requisites
|
||||
|
||||
For this example, you will need:
|
||||
|
|
|
@ -42,6 +42,9 @@ Even though metadata values can contain secrets in plain text, it is recommended
|
|||
Depending on the pub/sub message bus you are using and how it is configured, topics may be created automatically. Even if the message bus supports automatic topic creation, it is a common governance practice to disable it in production environments. You may still need to use a CLI, admin console, or request form to manually create the topics required by your application.
|
||||
{{% /alert %}}
|
||||
|
||||
While all pub/sub components support `consumerID` metadata, the runtime creates a consumer ID if you do not supply one. All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup.
|
||||
For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}).
|
||||
|
||||
Visit [this guide]({{< ref "howto-publish-subscribe.md#step-3-publish-a-topic" >}}) for instructions on configuring and using pub/sub components.
|
||||
|
||||
## Related links
|
||||
|
|
|
@ -30,6 +30,10 @@ The table below shows which resources are deployed to which namespaces:
|
|||
| Python subscriber | X | |
|
||||
| React UI publisher | | X |
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
All pub/sub components support limiting pub/sub topics to specific applications using [namespace or component scopes]({{< ref pubsub-scopes.md >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
* [Dapr installed on Kubernetes]({{< ref "kubernetes-deploy.md" >}}) in any namespace since Dapr works at the cluster level.
|
||||
|
|
|
@ -18,13 +18,13 @@ A Dapr sidecar can also apply a configuration by using a `--config` flag to the
|
|||
|
||||
#### Kubernetes sidecar
|
||||
|
||||
In Kubernetes mode the Dapr configuration is a Configuration CRD, that is applied to the cluster. For example:
|
||||
In Kubernetes mode the Dapr configuration is a Configuration resource, that is applied to the cluster. For example:
|
||||
|
||||
```bash
|
||||
kubectl apply -f myappconfig.yaml
|
||||
```
|
||||
|
||||
You can use the Dapr CLI to list the Configuration CRDs
|
||||
You can use the Dapr CLI to list the Configuration resources
|
||||
|
||||
```bash
|
||||
dapr configurations -k
|
||||
|
@ -269,11 +269,11 @@ spec:
|
|||
action: allow
|
||||
```
|
||||
|
||||
## Control-plane configuration
|
||||
## Control plane configuration
|
||||
|
||||
There is a single configuration file called `daprsystem` installed with the Dapr control plane system services that applies global settings. This is only set up when Dapr is deployed to Kubernetes.
|
||||
|
||||
### Control-plane configuration settings
|
||||
### Control plane configuration settings
|
||||
|
||||
A Dapr control plane configuration contains the following sections:
|
||||
|
||||
|
|
|
@ -3,12 +3,12 @@ type: docs
|
|||
title: "How-To: Limit the secrets that can be read from secret stores"
|
||||
linkTitle: "Limit secret store access"
|
||||
weight: 3000
|
||||
description: "To limit the secrets to which the Dapr application has access, users can define secret scopes by augmenting existing configuration CRD with restrictive permissions."
|
||||
description: "To limit the secrets to which the Dapr application has access, users can define secret scopes by augmenting existing configuration resource with restrictive permissions."
|
||||
---
|
||||
|
||||
In addition to scoping which applications can access a given component, for example a secret store component (see [Scoping components]({{< ref "component-scopes.md">}})), a named secret store component itself can be scoped to one or more secrets for an application. By defining `allowedSecrets` and/or `deniedSecrets` list, applications can be restricted to access only specific secrets.
|
||||
|
||||
Follow [these instructions]({{< ref "configuration-overview.md" >}}) to define a configuration CRD.
|
||||
Follow [these instructions]({{< ref "configuration-overview.md" >}}) to define a configuration resource.
|
||||
|
||||
## Configure secrets access
|
||||
|
||||
|
|
|
@ -1,56 +1,63 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Setup an Azure Kubernetes Service (AKS) cluster"
|
||||
title: "Set up an Azure Kubernetes Service (AKS) cluster"
|
||||
linkTitle: "Azure Kubernetes Service (AKS)"
|
||||
weight: 2000
|
||||
description: >
|
||||
How to setup Dapr on an Azure Kubernetes Cluster.
|
||||
Learn how to set up an Azure Kubernetes Cluster
|
||||
---
|
||||
|
||||
# Set up an Azure Kubernetes Service cluster
|
||||
This guide walks you through installing an Azure Kubernetes Service (AKS) cluster. If you need more information, refer to [Quickstart: Deploy an AKS cluster using the Azure CLI](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest)
|
||||
- Install:
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli)
|
||||
|
||||
## Deploy an Azure Kubernetes Service cluster
|
||||
## Deploy an AKS cluster
|
||||
|
||||
This guide walks you through installing an Azure Kubernetes Service cluster. If you need more information, refer to [Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using the Azure CLI](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough)
|
||||
1. In the terminal, log into Azure.
|
||||
|
||||
1. Login to Azure
|
||||
```bash
|
||||
az login
|
||||
```
|
||||
|
||||
```bash
|
||||
az login
|
||||
```
|
||||
1. Set your default subscription:
|
||||
|
||||
2. Set the default subscription
|
||||
```bash
|
||||
az account set -s [your_subscription_id]
|
||||
```
|
||||
|
||||
```bash
|
||||
az account set -s [your_subscription_id]
|
||||
```
|
||||
1. Create a resource group.
|
||||
|
||||
3. Create a resource group
|
||||
```bash
|
||||
az group create --name [your_resource_group] --location [region]
|
||||
```
|
||||
|
||||
```bash
|
||||
az group create --name [your_resource_group] --location [region]
|
||||
```
|
||||
1. Create an AKS cluster. To use a specific version of Kubernetes, use `--kubernetes-version` (1.13.x or newer version required).
|
||||
|
||||
4. Create an Azure Kubernetes Service cluster
|
||||
```bash
|
||||
az aks create --resource-group [your_resource_group] --name [your_aks_cluster_name] --node-count 2 --enable-addons http_application_routing --generate-ssh-keys
|
||||
```
|
||||
|
||||
> **Note:** To use a specific version of Kubernetes use `--kubernetes-version` (1.13.x or newer version required)
|
||||
1. Get the access credentials for the AKS cluster.
|
||||
|
||||
```bash
|
||||
az aks create --resource-group [your_resource_group] --name [your_aks_cluster_name] --node-count 2 --enable-addons http_application_routing --generate-ssh-keys
|
||||
```
|
||||
```bash
|
||||
az aks get-credentials -n [your_aks_cluster_name] -g [your_resource_group]
|
||||
```
|
||||
|
||||
5. Get the access credentials for the Azure Kubernetes cluster
|
||||
## AKS Edge Essentials
|
||||
To create a single-machine K8s/K3s Linux-only cluster using Azure Kubernetes Service (AKS) Edge Essentials, you can follow the quickstart guide available at [AKS Edge Essentials quickstart guide](https://learn.microsoft.com/en-us/azure/aks/hybrid/aks-edge-quickstart).
|
||||
|
||||
```bash
|
||||
az aks get-credentials -n [your_aks_cluster_name] -g [your_resource_group]
|
||||
```
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
AKS Edge Essentials does not come with a default storage class, which may cause issues when deploying Dapr. To avoid this, make sure to enable the **local-path-provisioner** storage class on the cluster before deploying Dapr. If you need more information, refer to [Local Path Provisioner on AKS EE](https://learn.microsoft.com/azure/aks/hybrid/aks-edge-howto-use-storage-local-path).
|
||||
{{% /alert %}}
|
||||
|
||||
## Next steps
|
||||
## Related links
|
||||
|
||||
{{< button text="Install Dapr using the AKS Dapr extension >>" page="azure-kubernetes-service-extension" >}}
|
||||
- Learn more about [the Dapr extension for AKS]({{< ref azure-kubernetes-service-extension >}})
|
||||
- [Install the Dapr extension for AKS](https://learn.microsoft.com/azure/aks/dapr)
|
||||
- [Configure the Dapr extension for AKS](https://learn.microsoft.com/azure/aks/dapr-settings)
|
||||
- [Deploy and run workflows with the Dapr extension for AKS](https://learn.microsoft.com/azure/aks/dapr-workflow)
|
||||
|
|
|
@ -1,55 +1,86 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Setup a Google Kubernetes Engine (GKE) cluster"
|
||||
title: "Set up a Google Kubernetes Engine (GKE) cluster"
|
||||
linkTitle: "Google Kubernetes Engine (GKE)"
|
||||
weight: 3000
|
||||
description: "Setup a Google Kubernetes Engine cluster"
|
||||
description: "Set up a Google Kubernetes Engine cluster"
|
||||
---
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Google Cloud SDK](https://cloud.google.com/sdk)
|
||||
- Install:
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Google Cloud SDK](https://cloud.google.com/sdk)
|
||||
|
||||
## Create a new cluster
|
||||
|
||||
Create a GKE cluster by running the following:
|
||||
|
||||
```bash
|
||||
$ gcloud services enable container.googleapis.com && \
|
||||
gcloud container clusters create $CLUSTER_NAME \
|
||||
--zone $ZONE \
|
||||
--project $PROJECT_ID
|
||||
```
|
||||
For more options refer to the [Google Cloud SDK docs](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create), or instead create a cluster through the [Cloud Console](https://console.cloud.google.com/kubernetes) for a more interactive experience.
|
||||
For more options:
|
||||
- Refer to the [Google Cloud SDK docs](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create).
|
||||
- Create a cluster through the [Cloud Console](https://console.cloud.google.com/kubernetes) for a more interactive experience.
|
||||
|
||||
{{% alert title="For private GKE clusters" color="warning" %}}
|
||||
Sidecar injection will not work for private clusters without extra steps. An automatically created firewall rule for master access does not open port 4000. This is needed for Dapr sidecar injection.
|
||||
## Sidecar injection for private GKE clusters
|
||||
|
||||
_**Sidecar injection for private clusters requires extra steps.**_
|
||||
|
||||
In private GKE clusters, an automatically created firewall rule for master access doesn't open port 4000, which Dapr needs for sidecar injection.
|
||||
|
||||
Review the relevant firewall rule:
|
||||
|
||||
To review the relevant firewall rule:
|
||||
```bash
|
||||
$ gcloud compute firewall-rules list --filter="name~gke-${CLUSTER_NAME}-[0-9a-z]*-master"
|
||||
```
|
||||
|
||||
To replace the existing rule and allow kubernetes master access to port 4000:
|
||||
Replace the existing rule and allow Kubernetes master access to port 4000:
|
||||
|
||||
```bash
|
||||
$ gcloud compute firewall-rules update <firewall-rule-name> --allow tcp:10250,tcp:443,tcp:4000
|
||||
```
|
||||
{{% /alert %}}
|
||||
|
||||
## Retrieve your credentials for `kubectl`
|
||||
|
||||
Run the following command to retrieve your credentials:
|
||||
|
||||
```bash
|
||||
$ gcloud container clusters get-credentials $CLUSTER_NAME \
|
||||
--zone $ZONE \
|
||||
--project $PROJECT_ID
|
||||
```
|
||||
|
||||
## (optional) Install Helm v3
|
||||
## Install Helm v3 (optional)
|
||||
|
||||
1. [Install Helm v3 client](https://helm.sh/docs/intro/install/)
|
||||
If you are using Helm, install the [Helm v3 client](https://helm.sh/docs/intro/install/).
|
||||
|
||||
> **Note:** The latest Dapr helm chart no longer supports Helm v2. Please migrate from helm v2 to helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
{{% alert title="Important" color="warning" %}}
|
||||
The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
{{% /alert %}}
|
||||
|
||||
2. In case you need permissions the kubernetes dashboard (i.e. configmaps is forbidden: User "system:serviceaccount:kube-system:kubernetes-dashboard" cannot list configmaps in the namespace "default", etc.) execute this command
|
||||
## Troubleshooting
|
||||
|
||||
### Kubernetes dashboard permissions
|
||||
|
||||
Let's say you receive an error message similar to the following:
|
||||
|
||||
```
|
||||
configmaps is forbidden: User "system:serviceaccount:kube-system:kubernetes-dashboard" cannot list configmaps in the namespace "default"
|
||||
```
|
||||
|
||||
Execute this command:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding kubernetes-dashboard -n kube-system --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard
|
||||
```
|
||||
|
||||
## Related links
|
||||
- [Learn more about GKE clusters](https://cloud.google.com/kubernetes-engine/docs)
|
||||
- [Try out a Dapr quickstart]({{< ref quickstarts.md >}})
|
||||
- Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}})
|
||||
- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}})
|
||||
- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}})
|
|
@ -4,108 +4,117 @@ title: "Set up a KiND cluster"
|
|||
linkTitle: "KiND"
|
||||
weight: 1100
|
||||
description: >
|
||||
How to set up Dapr on a KiND cluster.
|
||||
How to set up a KiND cluster
|
||||
---
|
||||
|
||||
# Set up a KiND cluster
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
|
||||
> Note: For Windows, enable Virtualization in BIOS and [install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v)
|
||||
- Install:
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- For Windows:
|
||||
- Enable Virtualization in BIOS
|
||||
- [Install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v)
|
||||
|
||||
## Install and configure KiND
|
||||
|
||||
Make sure you follow one of the [Installation](https://kind.sigs.k8s.io/docs/user/quick-start) options for KiND.
|
||||
[Refer to the KiND documentation to install.](https://kind.sigs.k8s.io/docs/user/quick-start)
|
||||
|
||||
In case you are using Docker Desktop, double-check that you have performed the recommended [settings](https://kind.sigs.k8s.io/docs/user/quick-start#settings-for-docker-desktop) (4 CPUs and 8 GiB of RAM available to Docker Engine).
|
||||
If you are using Docker Desktop, verify that you have [the recommended settings](https://kind.sigs.k8s.io/docs/user/quick-start#settings-for-docker-desktop).
|
||||
|
||||
## Configure and create the KiND cluster
|
||||
|
||||
1. Create a file named `kind-cluster-config.yaml`, and paste the following:
|
||||
```yaml
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: 8081
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
- role: worker
|
||||
- role: worker
|
||||
```
|
||||
|
||||
This is going to request KiND to spin up a kubernetes cluster comprised of a control plane and two worker nodes. It also allows for future setup of ingresses and exposes container ports to the host machine.
|
||||
```yaml
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: 8081
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
- role: worker
|
||||
- role: worker
|
||||
```
|
||||
|
||||
2. Run the `kind create cluster` providing the cluster configuration file:
|
||||
This cluster configuration:
|
||||
- Requests KiND to spin up a Kubernetes cluster comprised of a control plane and two worker nodes.
|
||||
- Allows for future setup of ingresses.
|
||||
- Exposes container ports to the host machine.
|
||||
|
||||
```bash
|
||||
kind create cluster --config kind-cluster-config.yaml
|
||||
```
|
||||
1. Run the `kind create cluster` command, providing the cluster configuration file:
|
||||
|
||||
Wait until the cluster is created, the output should look like this:
|
||||
```bash
|
||||
kind create cluster --config kind-cluster-config.yaml
|
||||
```
|
||||
|
||||
```md
|
||||
Creating cluster "kind" ...
|
||||
✓ Ensuring node image (kindest/node:v1.21.1) 🖼
|
||||
✓ Preparing nodes 📦 📦 📦
|
||||
✓ Writing configuration 📜
|
||||
✓ Starting control-plane 🕹️
|
||||
✓ Installing CNI 🔌
|
||||
✓ Installing StorageClass 💾
|
||||
✓ Joining worker nodes 🚜
|
||||
Set kubectl context to "kind-kind"
|
||||
You can now use your cluster with:
|
||||
**Expected output**
|
||||
|
||||
kubectl cluster-info --context kind-kind
|
||||
```md
|
||||
Creating cluster "kind" ...
|
||||
✓ Ensuring node image (kindest/node:v1.21.1) 🖼
|
||||
✓ Preparing nodes 📦 📦 📦
|
||||
✓ Writing configuration 📜
|
||||
✓ Starting control-plane 🕹️
|
||||
✓ Installing CNI 🔌
|
||||
✓ Installing StorageClass 💾
|
||||
✓ Joining worker nodes 🚜
|
||||
Set kubectl context to "kind-kind"
|
||||
You can now use your cluster with:
|
||||
|
||||
kubectl cluster-info --context kind-kind
|
||||
|
||||
Thanks for using kind! 😊
|
||||
```
|
||||
|
||||
Thanks for using kind! 😊
|
||||
```
|
||||
## Initialize and run Dapr
|
||||
|
||||
## Dapr
|
||||
1. Initialize Dapr in Kubernetes.
|
||||
|
||||
1. Initialize Dapr:
|
||||
```bash
|
||||
dapr init --kubernetes
|
||||
```
|
||||
```bash
|
||||
dapr init --kubernetes
|
||||
```
|
||||
|
||||
Once Dapr finishes initializing its core components are ready to be used on the cluster.
|
||||
Once Dapr finishes initializing, you can use its core components on the cluster.
|
||||
|
||||
To verify the status of these components run:
|
||||
```bash
|
||||
dapr status -k
|
||||
```
|
||||
the output should look like this:
|
||||
1. Verify the status of the Dapr components:
|
||||
|
||||
```md
|
||||
NAME NAMESPACE HEALTHY STATUS REPLICAS VERSION AGE CREATED
|
||||
dapr-sentry dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17
|
||||
dapr-operator dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17
|
||||
dapr-sidecar-injector dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17
|
||||
dapr-dashboard dapr-system True Running 1 0.9.0 53s 2021-12-10 09:27.17
|
||||
dapr-placement-server dapr-system True Running 1 1.5.1 52s 2021-12-10 09:27.18
|
||||
```
|
||||
```bash
|
||||
dapr status -k
|
||||
```
|
||||
|
||||
2. Forward a port to [Dapr dashboard](https://docs.dapr.io/reference/cli/dapr-dashboard/):
|
||||
**Expected output**
|
||||
|
||||
```bash
|
||||
dapr dashboard -k -p 9999
|
||||
```
|
||||
```md
|
||||
NAME NAMESPACE HEALTHY STATUS REPLICAS VERSION AGE CREATED
|
||||
dapr-sentry dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17
|
||||
dapr-operator dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17
|
||||
dapr-sidecar-injector dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17
|
||||
dapr-dashboard dapr-system True Running 1 0.9.0 53s 2021-12-10 09:27.17
|
||||
dapr-placement-server dapr-system True Running 1 1.5.1 52s 2021-12-10 09:27.18
|
||||
```
|
||||
|
||||
So that you can validate that the setup finished successfully by navigating to `http://localhost:9999`.
|
||||
1. Forward a port to [Dapr dashboard](https://docs.dapr.io/reference/cli/dapr-dashboard/):
|
||||
|
||||
## Next steps
|
||||
```bash
|
||||
dapr dashboard -k -p 9999
|
||||
```
|
||||
|
||||
1. Navigate to `http://localhost:9999` to validate a successful setup.
|
||||
|
||||
## Related links
|
||||
- [Try out a Dapr quickstart]({{< ref quickstarts.md >}})
|
||||
|
||||
- Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}})
|
||||
- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}})
|
||||
- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}})
|
|
@ -1,60 +1,63 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Setup an Minikube cluster"
|
||||
title: "Set up a Minikube cluster"
|
||||
linkTitle: "Minikube"
|
||||
weight: 1000
|
||||
description: >
|
||||
How to setup Dapr on a Minikube cluster.
|
||||
How to setup a Minikube cluster
|
||||
---
|
||||
|
||||
# Set up a Minikube cluster
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- Install:
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- For Windows:
|
||||
- Enable Virtualization in BIOS
|
||||
- [Install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v)
|
||||
|
||||
> Note: For Windows, enable Virtualization in BIOS and [install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v)
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
See [the official Minikube documentation on drivers](https://minikube.sigs.k8s.io/docs/reference/drivers/) for details on supported drivers and how to install plugins.
|
||||
{{% /alert %}}
|
||||
|
||||
## Start the Minikube cluster
|
||||
|
||||
1. (optional) Set the default VM driver
|
||||
1. If applicable for your project, set the default VM.
|
||||
|
||||
```bash
|
||||
minikube config set vm-driver [driver_name]
|
||||
```
|
||||
```bash
|
||||
minikube config set vm-driver [driver_name]
|
||||
```
|
||||
|
||||
> Note: See [DRIVERS](https://minikube.sigs.k8s.io/docs/reference/drivers/) for details on supported drivers and how to install plugins.
|
||||
1. Start the cluster. If necessary, specify version 1.13.x or newer of Kubernetes with `--kubernetes-version`
|
||||
|
||||
2. Start the cluster
|
||||
Use 1.13.x or newer version of Kubernetes with `--kubernetes-version`
|
||||
```bash
|
||||
minikube start --cpus=4 --memory=4096
|
||||
```
|
||||
|
||||
```bash
|
||||
minikube start --cpus=4 --memory=4096
|
||||
```
|
||||
1. Enable the Minikube dashboard and ingress add-ons.
|
||||
|
||||
3. Enable dashboard and ingress addons
|
||||
```bash
|
||||
# Enable dashboard
|
||||
minikube addons enable dashboard
|
||||
|
||||
# Enable ingress
|
||||
minikube addons enable ingress
|
||||
```
|
||||
|
||||
```bash
|
||||
# Enable dashboard
|
||||
minikube addons enable dashboard
|
||||
## Install Helm v3 (optional)
|
||||
|
||||
# Enable ingress
|
||||
minikube addons enable ingress
|
||||
```
|
||||
If you are using Helm, install the [Helm v3 client](https://helm.sh/docs/intro/install/).
|
||||
|
||||
## (optional) Install Helm v3
|
||||
{{% alert title="Important" color="warning" %}}
|
||||
The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
{{% /alert %}}
|
||||
|
||||
1. [Install Helm v3 client](https://helm.sh/docs/intro/install/)
|
||||
## Troubleshooting
|
||||
|
||||
> **Note:** The latest Dapr helm chart no longer supports Helm v2. Please migrate from helm v2 to helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
The external IP address of load balancer is not shown from `kubectl get svc`.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
1. The external IP address of load balancer is not shown from `kubectl get svc`
|
||||
|
||||
In Minikube, EXTERNAL-IP in `kubectl get svc` shows `<pending>` state for your service. In this case, you can run `minikube service [service_name]` to open your service without external IP address.
|
||||
In Minikube, `EXTERNAL-IP` in `kubectl get svc` shows `<pending>` state for your service. In this case, you can run `minikube service [service_name]` to open your service without external IP address.
|
||||
|
||||
```bash
|
||||
$ kubectl get svc
|
||||
|
@ -72,3 +75,9 @@ $ minikube service calculator-front-end
|
|||
|-----------|----------------------|-------------|---------------------------|
|
||||
🎉 Opening kubernetes service default/calculator-front-end in default browser...
|
||||
```
|
||||
|
||||
## Related links
|
||||
- [Try out a Dapr quickstart]({{< ref quickstarts.md >}})
|
||||
- Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}})
|
||||
- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}})
|
||||
- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}})
|
|
@ -8,83 +8,93 @@ aliases:
|
|||
- /getting-started/install-dapr-kubernetes/
|
||||
---
|
||||
|
||||
When setting up Kubernetes you can use either the Dapr CLI or Helm.
|
||||
|
||||
For more information on what is deployed to your Kubernetes cluster read the [Kubernetes overview]({{< ref kubernetes-overview.md >}})
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Install [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
- Install [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- Kubernetes cluster (see below if needed)
|
||||
|
||||
### Create cluster
|
||||
|
||||
You can install Dapr on any Kubernetes cluster. Here are some helpful links:
|
||||
|
||||
- [Setup KiNd Cluster]({{< ref setup-kind.md >}})
|
||||
- [Setup Minikube Cluster]({{< ref setup-minikube.md >}})
|
||||
- [Setup Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}})
|
||||
- [Setup Google Cloud Kubernetes Engine](https://docs.dapr.io/operations/hosting/kubernetes/cluster/setup-gke/)
|
||||
- [Setup Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
|
||||
When [setting up Dapr on Kubernetes]({{< ref kubernetes-overview.md >}}), you can use either the Dapr CLI or Helm.
|
||||
|
||||
{{% alert title="Hybrid clusters" color="primary" %}}
|
||||
Both the Dapr CLI and the Dapr Helm chart automatically deploy with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy Dapr to Windows nodes if your application requires it. For more information see [Deploying to a hybrid Linux/Windows Kubernetes cluster]({{<ref kubernetes-hybrid-clusters>}}).
|
||||
Both the Dapr CLI and the Dapr Helm chart automatically deploy with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy Dapr to Windows nodes if your application requires it. For more information, see [Deploying to a hybrid Linux/Windows Kubernetes cluster]({{< ref kubernetes-hybrid-clusters >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
{{< tabs "Dapr CLI" "Helm" >}}
|
||||
<!-- Dapr CLI -->
|
||||
{{% codetab %}}
|
||||
## Install with Dapr CLI
|
||||
|
||||
You can install Dapr to a Kubernetes cluster using the [Dapr CLI]({{< ref install-dapr-cli.md >}}).
|
||||
You can install Dapr on a Kubernetes cluster using the [Dapr CLI]({{< ref install-dapr-cli.md >}}).
|
||||
|
||||
### Install Dapr (from an official Dapr Helm chart)
|
||||
### Prerequisites
|
||||
|
||||
- Install:
|
||||
- [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- Create a Kubernetes cluster with Dapr. Here are some helpful links:
|
||||
- [Set up KiNd Cluster]({{< ref setup-kind.md >}})
|
||||
- [Set up Minikube Cluster]({{< ref setup-minikube.md >}})
|
||||
- [Set up Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}})
|
||||
- [Set up GKE cluster]({{< ref setup-gke.md >}})
|
||||
- [Set up Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
|
||||
|
||||
|
||||
### Installation options
|
||||
|
||||
You can install Dapr from an official Helm chart or a private chart, using a custom namespace, etc.
|
||||
|
||||
#### Install Dapr from an official Dapr Helm chart
|
||||
|
||||
The `-k` flag initializes Dapr on the Kubernetes cluster in your current context.
|
||||
|
||||
{{% alert title="Ensure correct cluster is set" color="warning" %}}
|
||||
Make sure the correct "target" cluster is set. Check `kubectl context (kubectl config get-contexts)` to verify. You can set a different context using `kubectl config use-context <CONTEXT>`.
|
||||
{{% /alert %}}
|
||||
1. Verify the correct "target" cluster is set by checking `kubectl context (kubectl config get-contexts)`.
|
||||
- You can set a different context using `kubectl config use-context <CONTEXT>`.
|
||||
|
||||
Run the following command on your local machine to init Dapr on your cluster:
|
||||
1. Initialize Dapr on your cluster with the following command:
|
||||
|
||||
```bash
|
||||
dapr init -k
|
||||
```
|
||||
```bash
|
||||
dapr init -k
|
||||
```
|
||||
|
||||
```bash
|
||||
⌛ Making the jump to hyperspace...
|
||||
**Expected output**
|
||||
|
||||
```bash
|
||||
⌛ Making the jump to hyperspace...
|
||||
|
||||
✅ Deploying the Dapr control plane to your cluster...
|
||||
✅ Success! Dapr has been installed to namespace dapr-system. To verify, run "dapr status -k" in your terminal. To get started, go here: https://aka.ms/dapr-getting-started
|
||||
```
|
||||
|
||||
1. Run the dashboard:
|
||||
|
||||
✅ Deploying the Dapr control plane to your cluster...
|
||||
✅ Success! Dapr has been installed to namespace dapr-system. To verify, run "dapr status -k" in your terminal. To get started, go here: https://aka.ms/dapr-getting-started
|
||||
```
|
||||
```bash
|
||||
dapr dashboard -k
|
||||
```
|
||||
|
||||
To run the dashboard, run:
|
||||
If you installed Dapr in a **non-default namespace**, run:
|
||||
|
||||
```bash
|
||||
dapr dashboard -k -n <your-namespace>
|
||||
```
|
||||
|
||||
```bash
|
||||
dapr dashboard -k
|
||||
```
|
||||
#### Install Dapr from a private Dapr Helm chart
|
||||
|
||||
If you installed Dapr in a non-default namespace, run:
|
||||
Installing Dapr from a private Helm chart can be helpful for when you:
|
||||
- Need more granular control of the Dapr Helm chart
|
||||
- Have a custom Dapr deployment
|
||||
- Pull Helm charts from trusted registries that are managed and maintained by your organization
|
||||
|
||||
```bash
|
||||
dapr dashboard -k -n <your-namespace>
|
||||
```
|
||||
|
||||
### Install Dapr (a private Dapr Helm chart)
|
||||
There are some scenarios where it's necessary to install Dapr from a private Helm chart, such as:
|
||||
- needing more granular control of the Dapr Helm chart
|
||||
- having a custom Dapr deployment
|
||||
- pulling Helm charts from trusted registries that are managed and maintained by your organization
|
||||
Set the following parameters to allow `dapr init -k` to install Dapr images from the configured Helm repository.
|
||||
|
||||
```
|
||||
export DAPR_HELM_REPO_URL="https://helm.custom-domain.com/dapr/dapr"
|
||||
export DAPR_HELM_REPO_USERNAME="username_xxx"
|
||||
export DAPR_HELM_REPO_PASSWORD="passwd_xxx"
|
||||
```
|
||||
#### Install in high availability mode
|
||||
|
||||
Setting the above parameters will allow `dapr init -k` to install Dapr images from the configured Helm repository.
|
||||
You can run Dapr with three replicas of each control plane pod in the `dapr-system` namespace for [production scenarios]({{< ref kubernetes-production.md >}}).
|
||||
|
||||
### Install in custom namespace
|
||||
```bash
|
||||
dapr init -k --enable-ha=true
|
||||
```
|
||||
|
||||
#### Install in custom namespace
|
||||
|
||||
The default namespace when initializing Dapr is `dapr-system`. You can override this with the `-n` flag.
|
||||
|
||||
|
@ -92,15 +102,7 @@ The default namespace when initializing Dapr is `dapr-system`. You can override
|
|||
dapr init -k -n mynamespace
|
||||
```
|
||||
|
||||
### Install in highly available mode
|
||||
|
||||
You can run Dapr with 3 replicas of each control plane pod in the dapr-system namespace for [production scenarios]({{< ref kubernetes-production.md >}}).
|
||||
|
||||
```bash
|
||||
dapr init -k --enable-ha=true
|
||||
```
|
||||
|
||||
### Disable mTLS
|
||||
#### Disable mTLS
|
||||
|
||||
Dapr is initialized by default with [mTLS]({{< ref "security-concept.md#sidecar-to-sidecar-communication" >}}). You can disable it with:
|
||||
|
||||
|
@ -108,11 +110,9 @@ Dapr is initialized by default with [mTLS]({{< ref "security-concept.md#sidecar-
|
|||
dapr init -k --enable-mtls=false
|
||||
```
|
||||
|
||||
### Wait for the installation to complete
|
||||
#### Wait for the installation to complete
|
||||
|
||||
You can wait for the installation to complete its deployment with the `--wait` flag.
|
||||
|
||||
The default timeout is 300s (5 min), but can be customized with the `--timeout` flag.
|
||||
You can wait for the installation to complete its deployment with the `--wait` flag. The default timeout is 300s (5 min), but can be customized with the `--timeout` flag.
|
||||
|
||||
```bash
|
||||
dapr init -k --wait --timeout 600
|
||||
|
@ -126,18 +126,33 @@ Run the following command on your local machine to uninstall Dapr on your cluste
|
|||
dapr uninstall -k
|
||||
```
|
||||
|
||||
## Install with Helm (advanced)
|
||||
{{% /codetab %}}
|
||||
|
||||
You can install Dapr on Kubernetes using a Helm 3 chart.
|
||||
<!-- Helm -->
|
||||
{{% codetab %}}
|
||||
|
||||
## Install with Helm
|
||||
|
||||
You can install Dapr on Kubernetes using a Helm v3 chart.
|
||||
|
||||
❗**Important:** The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Install:
|
||||
- [Helm v3](https://helm.sh/docs/intro/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- Create a Kubernetes cluster with Dapr. Here are some helpful links:
|
||||
- [Set up KiNd Cluster]({{< ref setup-kind.md >}})
|
||||
- [Set up Minikube Cluster]({{< ref setup-minikube.md >}})
|
||||
- [Set up Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}})
|
||||
- [Set up GKE cluster]({{< ref setup-gke.md >}})
|
||||
- [Set up Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
|
||||
|
||||
{{% alert title="Ensure you are on Helm v3" color="primary" %}}
|
||||
The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm v2 to Helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
{{% /alert %}}
|
||||
|
||||
### Add and install Dapr Helm chart
|
||||
|
||||
1. Make sure [Helm 3](https://github.com/helm/helm/releases) is installed on your machine
|
||||
1. Add Helm repo and update
|
||||
1. Add the Helm repo and update:
|
||||
|
||||
```bash
|
||||
// Add the official Dapr Helm chart.
|
||||
|
@ -160,7 +175,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
|
|||
--wait
|
||||
```
|
||||
|
||||
To install in high availability mode:
|
||||
To install in **high availability** mode:
|
||||
|
||||
```bash
|
||||
helm upgrade --install dapr dapr/dapr \
|
||||
|
@ -173,18 +188,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
|
|||
|
||||
See [Guidelines for production ready deployments on Kubernetes]({{< ref kubernetes-production.md >}}) for more information on installing and upgrading Dapr using Helm.
|
||||
|
||||
### Uninstall Dapr on Kubernetes
|
||||
|
||||
```bash
|
||||
helm uninstall dapr --namespace dapr-system
|
||||
```
|
||||
|
||||
### More information
|
||||
|
||||
- Read [this guide]({{< ref kubernetes-production.md >}}) for recommended Helm chart values for production setups
|
||||
- See [this page](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) for details on Dapr Helm charts.
|
||||
|
||||
## Installing the Dapr dashboard as part of the control plane
|
||||
### (optional) Install the Dapr dashboard as part of the control plane
|
||||
|
||||
If you want to install the Dapr dashboard, use this Helm chart with the additional settings of your choice:
|
||||
|
||||
|
@ -200,9 +204,9 @@ kubectl create namespace dapr-system
|
|||
helm install dapr dapr/dapr-dashboard --namespace dapr-system
|
||||
```
|
||||
|
||||
## Verify installation
|
||||
### Verify installation
|
||||
|
||||
Once the installation is complete, verify that the dapr-operator, dapr-placement, dapr-sidecar-injector and dapr-sentry pods are running in the `dapr-system` namespace:
|
||||
Once the installation is complete, verify that the `dapr-operator`, `dapr-placement`, `dapr-sidecar-injector`, and `dapr-sentry` pods are running in the `dapr-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl get pods --namespace dapr-system
|
||||
|
@ -217,14 +221,44 @@ dapr-sidecar-injector-8555576b6f-29cqm 1/1 Running 0 40s
|
|||
dapr-sentry-9435776c7f-8f7yd 1/1 Running 0 40s
|
||||
```
|
||||
|
||||
## Using Mariner-based images
|
||||
### Uninstall Dapr on Kubernetes
|
||||
|
||||
When deploying Dapr, whether on Kubernetes or in Docker self-hosted, the default container images that are pulled are based on [*distroless*](https://github.com/GoogleContainerTools/distroless).
|
||||
```bash
|
||||
helm uninstall dapr --namespace dapr-system
|
||||
```
|
||||
|
||||
### More information
|
||||
|
||||
- Read [the Kubernetes productions guidelines]({{< ref kubernetes-production.md >}}) for recommended Helm chart values for production setups
|
||||
- [More details on Dapr Helm charts](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md)
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Use Mariner-based images
|
||||
|
||||
The default container images pulled on Kubernetes are based on [*distroless*](https://github.com/GoogleContainerTools/distroless).
|
||||
|
||||
Alternatively, you can use Dapr container images based on Mariner 2 (minimal distroless). [Mariner](https://github.com/microsoft/CBL-Mariner/), officially known as CBL-Mariner, is a free and open-source Linux distribution and container base image maintained by Microsoft. For some Dapr users, leveraging container images based on Mariner can help you meet compliance requirements.
|
||||
|
||||
To use Mariner-based images for Dapr, you need to add `-mariner` to your Docker tags. For example, while `ghcr.io/dapr/dapr:latest` is the Docker image based on *distroless*, `ghcr.io/dapr/dapr:latest-mariner` is based on Mariner. Tags pinned to a specific version are also available, such as `{{% dapr-latest-version short="true" %}}-mariner`.
|
||||
|
||||
{{< tabs "Dapr CLI" "Helm" >}}
|
||||
<!-- Dapr CLI -->
|
||||
{{% codetab %}}
|
||||
|
||||
In the Dapr CLI, you can switch to using Mariner-based images with the `--image-variant` flag.
|
||||
|
||||
```sh
|
||||
dapr init --image-variant mariner
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
<!-- Helm -->
|
||||
{{% codetab %}}
|
||||
|
||||
With Kubernetes and Helm, you can use Mariner-based images by setting the `global.tag` option and adding `-mariner`. For example:
|
||||
|
||||
```sh
|
||||
|
@ -236,6 +270,12 @@ helm upgrade --install dapr dapr/dapr \
|
|||
--wait
|
||||
```
|
||||
|
||||
## Next steps
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Related links
|
||||
- [Deploy Dapr with Helm parameters and other details]({{< ref "kubernetes-production.md#deploy-dapr-with-helm" >}})
|
||||
- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}})
|
||||
- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}})
|
||||
- [Configure state store & pubsub message broker]({{< ref "getting-started/tutorials/configure-state-pubsub.md" >}})
|
||||
|
|
|
@ -6,24 +6,30 @@ weight: 60000
|
|||
description: "How to run Dapr apps on Kubernetes clusters with Windows nodes"
|
||||
---
|
||||
|
||||
Dapr supports running on Kubernetes clusters with Windows nodes. You can run your Dapr microservices exclusively on Windows, exclusively on Linux, or a combination of both. This is helpful to users who may be doing a piecemeal migration of a legacy application into a Dapr Kubernetes cluster.
|
||||
Dapr supports running your microservices on Kubernetes clusters on:
|
||||
- Windows
|
||||
- Linux
|
||||
- A combination of both
|
||||
|
||||
Kubernetes uses a concept called node affinity so that you can denote whether you want your application to be launched on a Linux node or a Windows node. When deploying to a cluster which has both Windows and Linux nodes, you must provide affinity rules for your applications, otherwise the Kubernetes scheduler might launch your application on the wrong type of node.
|
||||
This is especially helpful during a piecemeal migration of a legacy application into a Dapr Kubernetes cluster.
|
||||
|
||||
## Pre-requisites
|
||||
Kubernetes uses a concept called **node affinity** to denote whether you want your application to be launched on a Linux node or a Windows node. When deploying to a cluster which has both Windows and Linux nodes, you must provide affinity rules for your applications, otherwise the Kubernetes scheduler might launch your application on the wrong type of node.
|
||||
|
||||
You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers support the automatic provisioning of Windows enabled Kubernetes clusters.
|
||||
## Prerequisites
|
||||
|
||||
1. Follow your preferred provider's instructions for setting up a cluster with Windows enabled
|
||||
Before you begin, set up a Kubernetes cluster with Windows nodes. Many Kubernetes providers support the automatic provisioning of Windows enabled Kubernetes clusters.
|
||||
|
||||
- [Setting up Windows on Azure AKS](https://docs.microsoft.com/azure/aks/windows-container-cli)
|
||||
- [Setting up Windows on AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html)
|
||||
- [Setting up Windows on Google Cloud GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows)
|
||||
1. Follow your preferred provider's instructions for setting up a cluster with Windows enabled.
|
||||
|
||||
2. Once you have set up the cluster, you should see that it has both Windows and Linux nodes available
|
||||
- [Setting up Windows on Azure AKS](https://docs.microsoft.com/azure/aks/windows-container-cli)
|
||||
- [Setting up Windows on AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html)
|
||||
- [Setting up Windows on Google Cloud GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows)
|
||||
|
||||
1. Once you have set up the cluster, verify that both Windows and Linux nodes are available.
|
||||
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
aks-nodepool1-11819434-vmss000000 Ready agent 6d v1.17.9 10.240.0.4 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
aks-nodepool1-11819434-vmss000001 Ready agent 6d v1.17.9 10.240.0.35 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
|
@ -31,29 +37,31 @@ You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers
|
|||
akswin000000 Ready agent 6d v1.17.9 10.240.0.66 <none> Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5
|
||||
akswin000001 Ready agent 6d v1.17.9 10.240.0.97 <none> Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5
|
||||
```
|
||||
## Installing the Dapr control plane
|
||||
|
||||
If you are installing using the Dapr CLI or via a helm chart, simply follow the normal deployment procedures:
|
||||
[Installing Dapr on a Kubernetes cluster]({{< ref "install-dapr-selfhost.md#installing-Dapr-on-a-kubernetes-cluster" >}})
|
||||
## Install the Dapr control plane
|
||||
|
||||
If you are installing using the Dapr CLI or via a Helm chart, simply follow the normal deployment procedures: [Installing Dapr on a Kubernetes cluster]({{< ref "install-dapr-selfhost.md#installing-Dapr-on-a-kubernetes-cluster" >}})
|
||||
|
||||
Affinity will be automatically set for `kubernetes.io/os=linux`. This will be sufficient for most users, as Kubernetes requires at least one Linux node pool.
|
||||
|
||||
> **Note:** Dapr control plane containers are built and tested for both Windows and Linux, however, we generally recommend using the Linux control plane containers. They tend to be smaller and have a much larger user base.
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Dapr control plane containers are built and tested for both Windows and Linux. However, it's recommended to use the Linux control plane containers, which tend to be smaller and have a much larger user base.
|
||||
|
||||
If you understand the above, but want to deploy the Dapr control plane to Windows, you can do so by setting:
|
||||
|
||||
```
|
||||
```sh
|
||||
helm install dapr dapr/dapr --set global.daprControlPlaneOs=windows
|
||||
```
|
||||
{{% /alert %}}
|
||||
|
||||
## Installing Dapr applications
|
||||
## Install Dapr applications
|
||||
|
||||
### Windows applications
|
||||
In order to launch a Dapr application on Windows, you'll first need to create a Docker container with your application installed. For a step by step guide see [Get started: Prep Windows for containers](https://docs.microsoft.com/virtualization/windowscontainers/quick-start/set-up-environment). Once you have a docker container with your application, create a deployment YAML file with node affinity set to kubernetes.io/os: windows.
|
||||
|
||||
1. Create a deployment YAML
|
||||
1. [Follow the Microsoft documentation to create a Docker Windows container with your application installed](https://learn.microsoft.com/virtualization/windowscontainers/quick-start/set-up-environment?tabs=dockerce).
|
||||
|
||||
1. Once you've created a Docker container with your application, create a deployment YAML file with the node affinity set to `kubernetes.io/os: windows`. In the example `deploy_windows.yaml` deployment file below:
|
||||
|
||||
Here is a sample deployment with nodeAffinity set to "windows". Modify as needed for your application.
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
@ -92,9 +100,8 @@ In order to launch a Dapr application on Windows, you'll first need to create a
|
|||
values:
|
||||
- windows
|
||||
```
|
||||
This deployment yaml will be the same as any other dapr application, with an additional spec.template.spec.affinity section as shown above.
|
||||
|
||||
2. Deploy to your Kubernetes cluster
|
||||
|
||||
1. Deploy the YAML file to your Kubernetes cluster.
|
||||
|
||||
```bash
|
||||
kubectl apply -f deploy_windows.yaml
|
||||
|
@ -102,11 +109,10 @@ In order to launch a Dapr application on Windows, you'll first need to create a
|
|||
|
||||
### Linux applications
|
||||
|
||||
If you already have a Dapr application that runs on Linux, you'll still need to add affinity rules as above, but choose Linux affinity instead.
|
||||
If you already have a Dapr application that runs on Linux, you still need to add affinity rules.
|
||||
|
||||
1. Create a deployment YAML
|
||||
1. Create a deployment YAML file with the node affinity set to `kubernetes.io/os: linux`. In the example `deploy_linux.yaml` deployment file below:
|
||||
|
||||
Here is a sample deployment with nodeAffinity set to "linux". Modify as needed for your application.
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
@ -146,13 +152,17 @@ If you already have a Dapr application that runs on Linux, you'll still need to
|
|||
- linux
|
||||
```
|
||||
|
||||
2. Deploy to your Kubernetes cluster
|
||||
1. Deploy the YAML to your Kubernetes cluster.
|
||||
|
||||
```bash
|
||||
kubectl apply -f deploy_linux.yaml
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
That's it!
|
||||
|
||||
## Clean up
|
||||
|
||||
To remove the deployments from this guide, run the following commands:
|
||||
|
||||
```bash
|
||||
kubectl delete -f deploy_linux.yaml
|
||||
|
|
|
@ -7,19 +7,19 @@ description: "Use Dapr API in a Kubernetes Job context"
|
|||
type: docs
|
||||
---
|
||||
|
||||
# Kubernetes Job
|
||||
The Dapr sidecar is designed to be a long running process. In the context of a [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) this behavior can block your job completion.
|
||||
|
||||
The Dapr sidecar is designed to be a long running process, in the context of a [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) this behaviour can block your job completion.
|
||||
To address this issue the Dapr sidecar has an endpoint to `Shutdown` the sidecar.
|
||||
To address this issue, the Dapr sidecar has an endpoint to `Shutdown` the sidecar.
|
||||
|
||||
When running a basic [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) you will need to call the `/shutdown` endpoint for the sidecar to gracefully stop and the job will be considered `Completed`.
|
||||
When running a basic [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/), you need to call the `/shutdown` endpoint for the sidecar to gracefully stop and the job to be considered `Completed`.
|
||||
|
||||
When a job is finished without calling `Shutdown`, your job will be in a `NotReady` state with only the `daprd` container running endlessly.
|
||||
When a job is finished without calling `Shutdown`, your job is in a `NotReady` state with only the `daprd` container running endlessly.
|
||||
|
||||
Stopping the Dapr sidecar causes its readiness and liveness probes to fail in your container.
|
||||
|
||||
Stopping the dapr sidecar will cause its readiness and liveness probes to fail in your container because the dapr sidecar was shutdown.
|
||||
To prevent Kubernetes from trying to restart your job, set your job's `restartPolicy` to `Never`.
|
||||
|
||||
Be sure to use the *POST* HTTP verb when calling the shutdown HTTP API.
|
||||
Be sure to use the *POST* HTTP verb when calling the shutdown HTTP API. For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
|
@ -40,7 +40,7 @@ spec:
|
|||
restartPolicy: Never
|
||||
```
|
||||
|
||||
You can also call the `Shutdown` from any of the Dapr SDKs
|
||||
You can also call the `Shutdown` from any of the Dapr SDKs. For example, for the Go SDK:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
@ -63,3 +63,8 @@ func main() {
|
|||
// Job
|
||||
}
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- [Deploy Dapr on Kubernetes]({{< ref kubernetes-deploy.md >}})
|
||||
- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}})
|
|
@ -6,23 +6,30 @@ weight: 10000
|
|||
description: "Overview of how to get Dapr running on your Kubernetes cluster"
|
||||
---
|
||||
|
||||
## Dapr on Kubernetes
|
||||
Dapr can be configured to run on any supported versions of Kubernetes. To achieve this, Dapr begins by deploying the following Kubernetes services, which provide first-class integration to make running applications with Dapr easy.
|
||||
|
||||
Dapr can be configured to run on any supported versions of Kubernetes. To achieve this, Dapr begins by deploying the `dapr-sidecar-injector`, `dapr-operator`, `dapr-placement`, and `dapr-sentry` Kubernetes services. These provide first-class integration to make running applications with Dapr easy.
|
||||
- **dapr-operator:** Manages [component]({{< ref components >}}) updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.)
|
||||
- **dapr-sidecar-injector:** Injects Dapr into [annotated](#adding-dapr-to-a-kubernetes-deployment) deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT` to enable user-defined applications to easily communicate with Dapr without hard-coding Dapr port values.
|
||||
- **dapr-placement:** Used for [actors]({{< ref actors >}}) only. Creates mapping tables that map actor instances to pods
|
||||
- **dapr-sentry:** Manages mTLS between services and acts as a certificate authority. For more information read the [security overview]({{< ref "security-concept.md" >}}).
|
||||
| Kubernetes services | Description |
|
||||
| ------------------- | ----------- |
|
||||
| `dapr-operator` | Manages [component]({{< ref components >}}) updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.) |
|
||||
| `dapr-sidecar-injector` | Injects Dapr into [annotated](#adding-dapr-to-a-kubernetes-deployment) deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT` to enable user-defined applications to easily communicate with Dapr without hard-coding Dapr port values. |
|
||||
| `dapr-placement` | Used for [actors]({{< ref actors >}}) only. Creates mapping tables that map actor instances to pods |
|
||||
| `dapr-sentry` | Manages mTLS between services and acts as a certificate authority. For more information read the [security overview]({{< ref "security-concept.md" >}}) |
|
||||
|
||||
<img src="/images/overview-kubernetes.png" width=1000>
|
||||
|
||||
## Supported versions
|
||||
Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy).
|
||||
|
||||
## Deploying Dapr to a Kubernetes cluster
|
||||
|
||||
Read [this guide]({{< ref kubernetes-deploy.md >}}) to learn how to deploy Dapr to your Kubernetes cluster.
|
||||
Read [Deploy Dapr on a Kubernetes cluster]({{< ref kubernetes-deploy.md >}}) to learn how to deploy Dapr to your Kubernetes cluster.
|
||||
|
||||
## Adding Dapr to a Kubernetes deployment
|
||||
|
||||
Deploying and running a Dapr enabled application into your Kubernetes cluster is as simple as adding a few annotations to the pods schema. To give your service an `id` and `port` known to Dapr, turn on tracing through configuration and launch the Dapr sidecar container, you annotate your Kubernetes pod like this. For more information check [dapr annotations]({{< ref arguments-annotations-overview.md >}})
|
||||
Deploying and running a Dapr-enabled application into your Kubernetes cluster is as simple as adding a few annotations to the pods schema. For example, in the following example, your Kubernetes pod is annotated to:
|
||||
- Give your service an `id` and `port` known to Dapr
|
||||
- Turn on tracing through configuration
|
||||
- Launch the Dapr sidecar container
|
||||
|
||||
```yml
|
||||
annotations:
|
||||
|
@ -32,20 +39,21 @@ Deploying and running a Dapr enabled application into your Kubernetes cluster is
|
|||
dapr.io/config: "tracing"
|
||||
```
|
||||
|
||||
For more information, check [Dapr annotations]({{< ref arguments-annotations-overview.md >}}).
|
||||
|
||||
## Pulling container images from private registries
|
||||
|
||||
Dapr works seamlessly with any user application container image, regardless of its origin. Simply init Dapr and add the [Dapr annotations]({{< ref arguments-annotations-overview.md >}}) to your Kubernetes definition to add the Dapr sidecar.
|
||||
Dapr works seamlessly with any user application container image, regardless of its origin. Simply [initialize Dapr]({{< ref install-dapr-selfhost.md >}}) and add the [Dapr annotations]({{< ref arguments-annotations-overview.md >}}) to your Kubernetes definition to add the Dapr sidecar.
|
||||
|
||||
The Dapr control-plane and sidecar images come from the [daprio Docker Hub](https://hub.docker.com/u/daprio) container registry, which is a public registry.
|
||||
The Dapr control plane and sidecar images come from the [daprio Docker Hub](https://hub.docker.com/u/daprio) container registry, which is a public registry.
|
||||
|
||||
For information about pulling your application images from a private registry, reference the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). If you are using Azure Container Registry with Azure Kubernetes Service, reference the [AKS documentation](https://docs.microsoft.com/azure/aks/cluster-container-registry-integration).
|
||||
For information about:
|
||||
- Pulling your application images from a private registry, reference the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).
|
||||
- Using Azure Container Registry with Azure Kubernetes Service, reference the [AKS documentation](https://docs.microsoft.com/azure/aks/cluster-container-registry-integration).
|
||||
|
||||
## Quickstart
|
||||
## Tutorials
|
||||
|
||||
You can see some examples [here](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes) in the Kubernetes getting started quickstart.
|
||||
|
||||
## Supported versions
|
||||
Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy).
|
||||
[Work through the Hello Kubernetes tutorial](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes) to learn more about getting started with Dapr on your Kubernetes cluster.
|
||||
|
||||
## Related links
|
||||
|
||||
|
|
|
@ -3,16 +3,14 @@ type: docs
|
|||
title: "Production guidelines on Kubernetes"
|
||||
linkTitle: "Production guidelines"
|
||||
weight: 40000
|
||||
description: "Recommendations and practices for deploying Dapr to a Kubernetes cluster in a production-ready configuration"
|
||||
description: "Best practices for deploying Dapr to a Kubernetes cluster in a production-ready configuration"
|
||||
---
|
||||
|
||||
## Cluster and capacity requirements
|
||||
|
||||
Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/).
|
||||
|
||||
For a production-ready Kubernetes cluster deployment, we recommended you run a cluster of at least 3 worker nodes to support a highly-available control plane installation.
|
||||
|
||||
Use the following resource settings as a starting point. Requirements will vary depending on cluster size, number of pods, and other factors, so you should perform individual testing to find the right values for your environment:
|
||||
Use the following resource settings as a starting point. Requirements vary depending on cluster size, number of pods, and other factors. Perform individual testing to find the right values for your environment.
|
||||
|
||||
| Deployment | CPU | Memory
|
||||
|-------------|-----|-------
|
||||
|
@ -23,7 +21,7 @@ Use the following resource settings as a starting point. Requirements will vary
|
|||
| **Dashboard** | Limit: 200m, Request: 50m | Limit: 200Mi, Request: 20Mi
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
For more info, read the [concept article on CPU and Memory resource units and their meaning](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes).
|
||||
For more information, refer to the Kubernetes documentation on [CPU and Memory resource units and their meaning](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes).
|
||||
{{% /alert %}}
|
||||
|
||||
### Helm
|
||||
|
@ -32,29 +30,26 @@ When installing Dapr using Helm, no default limit/request values are set. Each c
|
|||
|
||||
The [Helm chart readme](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) has detailed information and examples.
|
||||
|
||||
For local/dev installations, you might simply want to skip configuring the `resources` options.
|
||||
For local/dev installations, you might want to skip configuring the `resources` options.
|
||||
|
||||
### Optional components
|
||||
|
||||
The following Dapr control plane deployments are optional:
|
||||
|
||||
- **Placement**: needed to use Dapr Actors
|
||||
- **Sentry**: needed for mTLS for service to service invocation
|
||||
- **Dashboard**: needed to get an operational view of the cluster
|
||||
- **Placement**: For using Dapr Actors
|
||||
- **Sentry**: For mTLS for service-to-service invocation
|
||||
- **Dashboard**: For an operational view of the cluster
|
||||
|
||||
## Sidecar resource settings
|
||||
|
||||
To set the resource assignments for the Dapr sidecar, see the annotations [here]({{< ref "arguments-annotations-overview.md" >}}).
|
||||
The specific annotations related to resource constraints are:
|
||||
[Set the resource assignments for the Dapr sidecar using the supported annotations]({{< ref "arguments-annotations-overview.md" >}}). The specific annotations related to **resource constraints** are:
|
||||
|
||||
- `dapr.io/sidecar-cpu-limit`
|
||||
- `dapr.io/sidecar-memory-limit`
|
||||
- `dapr.io/sidecar-cpu-request`
|
||||
- `dapr.io/sidecar-memory-request`
|
||||
|
||||
If not set, the Dapr sidecar will run without resource settings, which may lead to issues. For a production-ready setup it is strongly recommended to configure these settings.
|
||||
|
||||
For more details on configuring resource in Kubernetes see [Assign Memory Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/) and [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/).
|
||||
If not set, the Dapr sidecar runs without resource settings, which may lead to issues. For a production-ready setup, it's strongly recommended to configure these settings.
|
||||
|
||||
Example settings for the Dapr sidecar in a production-ready setup:
|
||||
|
||||
|
@ -62,31 +57,56 @@ Example settings for the Dapr sidecar in a production-ready setup:
|
|||
|-----|--------|
|
||||
| Limit: 300m, Request: 100m | Limit: 1000Mi, Request: 250Mi
|
||||
|
||||
The CPU and memory limits above account for Dapr supporting a high number of I/O bound operations. Use a [monitoring tool]({{< ref observability >}}) to get a baseline for the sidecar (and app) containers and tune these settings based on those baselines.
|
||||
|
||||
For more details on configuring resource in Kubernetes, see the following Kubernetes guides:
|
||||
- [Assign Memory Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/)
|
||||
- [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/)
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Since Dapr is intended to do much of the I/O heavy lifting for your app, it's expected that the resources given to Dapr enable you to drastically reduce the resource allocations for the application.
|
||||
Since Dapr is intended to do much of the I/O heavy lifting for your app, the resources given to Dapr drastically reduce the resource allocations for the application.
|
||||
{{% /alert %}}
|
||||
|
||||
The CPU and memory limits above account for the fact that Dapr is intended to support a high number of I/O bound operations. It is strongly recommended that you use a monitoring tool to get a baseline for the sidecar (and app) containers and tune these settings based on those baselines.
|
||||
### Setting soft memory limits on Dapr sidecar
|
||||
|
||||
## Highly-available mode
|
||||
Set soft memory limits on the Dapr sidecar when you've set up memory limits. With soft memory limits, the sidecar garbage collector frees up memory once it exceeds the limit instead of waiting for it to be double of the last amount of memory present in the heap when it was run. Waiting is the default behavior of the [garbage collector](https://tip.golang.org/doc/gc-guide#Memory_limit) used in Go, and can lead to OOM Kill events.
|
||||
|
||||
When deploying Dapr in a production-ready configuration, it is recommend to deploy with a highly available (HA) configuration of the control plane, which creates 3 replicas of each control plane pod in the dapr-system namespace. This configuration allows the Dapr control plane to retain 3 running instances and survive individual node failures and other outages.
|
||||
For example, for an app with app-id `nodeapp` with memory limit set to 1000Mi, you can use the following in your pod annotations:
|
||||
|
||||
For a new Dapr deployment, the HA mode can be set with both the [Dapr CLI]({{< ref "kubernetes-deploy.md#install-in-highly-available-mode" >}}) and with [Helm charts]({{< ref "kubernetes-deploy.md#add-and-install-dapr-helm-chart" >}}).
|
||||
```yaml
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "nodeapp"
|
||||
# our daprd memory settings
|
||||
dapr.io/sidecar-memory-limit: "1000Mi" # your memory limit
|
||||
dapr.io/env: "GOMEMLIMIT=900MiB" # 90% of your memory limit. Also notice the suffix "MiB" instead of "Mi"
|
||||
```
|
||||
|
||||
For an existing Dapr deployment, enabling the HA mode requires additional steps. Please refer to [this paragraph]({{< ref "#enabling-high-availability-in-an-existing-dapr-deployment" >}}) for more details.
|
||||
In this example, the soft limit has been set to be 90% to leave 5-10% for other services, [as recommended](https://tip.golang.org/doc/gc-guide#Memory_limit).
|
||||
|
||||
## Deploying Dapr with Helm
|
||||
The `GOMEMLIMIT` environment variable [allows certain suffixes for the memory size: `B`, `KiB`, `MiB`, `GiB`, and `TiB`.](https://pkg.go.dev/runtime)
|
||||
|
||||
## High availability mode
|
||||
|
||||
When deploying Dapr in a production-ready configuration, it's best to deploy with a high availability (HA) configuration of the control plane. This creates three replicas of each control plane pod in the `dapr-system` namespace, allowing the Dapr control plane to retain three running instances and survive individual node failures and other outages.
|
||||
|
||||
For a new Dapr deployment, HA mode can be set with both:
|
||||
- The [Dapr CLI]({{< ref "kubernetes-deploy.md#install-in-highly-available-mode" >}}), and
|
||||
- [Helm charts]({{< ref "kubernetes-deploy.md#add-and-install-dapr-helm-chart" >}})
|
||||
|
||||
For an existing Dapr deployment, [you can enable HA mode in a few extra steps]({{< ref "#enabling-high-availability-in-an-existing-dapr-deployment" >}}).
|
||||
|
||||
## Deploy Dapr with Helm
|
||||
|
||||
[Visit the full guide on deploying Dapr with Helm]({{< ref "kubernetes-deploy.md#install-with-helm-advanced" >}}).
|
||||
|
||||
### Parameters file
|
||||
|
||||
Instead of specifying parameters on the command line, it's recommended to create a values file. This file should be checked into source control so that you can track its changes.
|
||||
It's recommended to create a values file, instead of specifying parameters on the command. Check the values file into source control so that you can track its changes.
|
||||
|
||||
For a full list of all available options you can set in the values file (or by using the `--set` command-line option), see https://github.com/dapr/dapr/blob/master/charts/dapr/README.md.
|
||||
[See a full list of available parameters and settings](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md).
|
||||
|
||||
Instead of using either `helm install` or `helm upgrade` as shown below, you can also run `helm upgrade --install` - this will dynamically determine whether to install or upgrade.
|
||||
The following command runs three replicas of each control plane service in the `dapr-system` namespace.
|
||||
|
||||
```bash
|
||||
# Add/update a official Dapr Helm repo.
|
||||
|
@ -119,84 +139,85 @@ helm install dapr dapr/dapr \
|
|||
kubectl get pods --namespace dapr-system
|
||||
```
|
||||
|
||||
This command will run 3 replicas of each control plane service in the dapr-system namespace.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
The Dapr Helm chart automatically deploys with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy the Dapr control plane to Windows nodes, but most users should not need to. For more information see [Deploying to a Hybrid Linux/Windows K8s Cluster]({{< ref "kubernetes-hybrid-clusters.md" >}}).
|
||||
|
||||
The example above uses `helm install` and `helm upgrade`. You can also run `helm upgrade --install` to dynamically determine whether to install or upgrade.
|
||||
{{% /alert %}}
|
||||
|
||||
## Upgrading Dapr with Helm
|
||||
The Dapr Helm chart automatically deploys with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy the Dapr control plane to Windows nodes. For more information, see [Deploying to a Hybrid Linux/Windows K8s Cluster]({{< ref "kubernetes-hybrid-clusters.md" >}}).
|
||||
|
||||
Dapr supports zero-downtime upgrades. The upgrade path includes the following steps:
|
||||
## Upgrade Dapr with Helm
|
||||
|
||||
1. Upgrading a CLI version (optional but recommended)
|
||||
2. Updating the Dapr control plane
|
||||
3. Updating the data plane (Dapr sidecars)
|
||||
Dapr supports zero-downtime upgrades in the following steps.
|
||||
|
||||
### Upgrading the CLI
|
||||
### Upgrade the CLI (recommended)
|
||||
|
||||
To upgrade the Dapr CLI, [download the latest version](https://github.com/dapr/cli/releases) of the CLI and ensure it's in your path.
|
||||
Upgrading the CLI is optional, but recommended.
|
||||
|
||||
### Upgrading the control plane
|
||||
1. [Download the latest version](https://github.com/dapr/cli/releases) of the CLI.
|
||||
1. Verify the Dapr CLI is in your path.
|
||||
|
||||
See [steps to upgrade Dapr on a Kubernetes cluster]({{< ref "kubernetes-upgrade.md#helm" >}}).
|
||||
### Upgrade the control plane
|
||||
|
||||
### Updating the data plane (sidecars)
|
||||
[Upgrade Dapr on a Kubernetes cluster]({{< ref "kubernetes-upgrade.md#helm" >}}).
|
||||
|
||||
The last step is to update pods that are running Dapr to pick up the new version of the Dapr runtime.
|
||||
To do that, simply issue a rollout restart command for any deployment that has the `dapr.io/enabled` annotation:
|
||||
### Update the data plane (sidecars)
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deploy/<Application deployment name>
|
||||
```
|
||||
Update pods that are running Dapr to pick up the new version of the Dapr runtime.
|
||||
|
||||
To see a list of all your Dapr enabled deployments, you can either use the [Dapr Dashboard](https://github.com/dapr/dashboard) or run the following command using the Dapr CLI:
|
||||
1. Issue a rollout restart command for any deployment that has the `dapr.io/enabled` annotation:
|
||||
|
||||
```bash
|
||||
dapr list -k
|
||||
```bash
|
||||
kubectl rollout restart deploy/<Application deployment name>
|
||||
```
|
||||
|
||||
APP ID APP PORT AGE CREATED
|
||||
nodeapp 3000 16h 2020-07-29 17:16.22
|
||||
```
|
||||
1. View a list of all your Dapr enabled deployments via either:
|
||||
- The [Dapr Dashboard](https://github.com/dapr/dashboard)
|
||||
- Running the following command using the Dapr CLI:
|
||||
|
||||
### Enabling high-availability in an existing Dapr deployment
|
||||
```bash
|
||||
dapr list -k
|
||||
|
||||
APP ID APP PORT AGE CREATED
|
||||
nodeapp 3000 16h 2020-07-29 17:16.22
|
||||
```
|
||||
|
||||
### Enable high availability in an existing Dapr deployment
|
||||
|
||||
Enabling HA mode for an existing Dapr deployment requires two steps:
|
||||
|
||||
1. Delete the existing placement stateful set:
|
||||
1. Delete the existing placement stateful set.
|
||||
|
||||
```bash
|
||||
kubectl delete statefulset.apps/dapr-placement-server -n dapr-system
|
||||
```
|
||||
|
||||
1. Issue the upgrade command:
|
||||
You delete the placement stateful set because, in HA mode, the placement service adds [Raft](https://raft.github.io/) for leader election. However, Kubernetes only allows for limited fields in stateful sets to be patched, subsequently failing upgrade of the placement service.
|
||||
|
||||
Deletion of the existing placement stateful set is safe. The agents reconnect and re-register with the newly created placement service, which persist its table in Raft.
|
||||
|
||||
1. Issue the upgrade command.
|
||||
|
||||
```bash
|
||||
helm upgrade dapr ./charts/dapr -n dapr-system --set global.ha.enabled=true
|
||||
```
|
||||
|
||||
You delete the placement stateful set because, in the HA mode, the placement service adds [Raft](https://raft.github.io/) for leader election. However, Kubernetes only allows for limited fields in stateful sets to be patched, subsequently failing upgrade of the placement service.
|
||||
|
||||
Deletion of the existing placement stateful set is safe. The agents will reconnect and re-register with the newly created placement service, which will persist its table in Raft.
|
||||
|
||||
## Recommended security configuration
|
||||
|
||||
When properly configured, Dapr ensures secure communication. It can also make your application more secure with a number of built-in features.
|
||||
When properly configured, Dapr ensures secure communication and can make your application more secure with a number of built-in features.
|
||||
|
||||
It is recommended that a production-ready deployment includes the following settings:
|
||||
Verify your production-ready deployment includes the following settings:
|
||||
|
||||
1. **Mutual Authentication (mTLS)** should be enabled. Note that Dapr has mTLS on by default. For details on how to bring your own certificates, see [here]({{< ref "mtls.md#bringing-your-own-certificates" >}})
|
||||
1. **Mutual Authentication (mTLS)** is enabled. Dapr has mTLS on by default. [Learn more about how to bring your own certificates]({{< ref "mtls.md#bringing-your-own-certificates" >}}).
|
||||
|
||||
2. **App to Dapr API authentication** is enabled. This is the communication between your application and the Dapr sidecar. To secure the Dapr API from unauthorized application access, it is recommended to enable Dapr's token based auth. See [enable API token authentication in Dapr]({{< ref "api-token.md" >}}) for details
|
||||
1. **App to Dapr API authentication** is enabled. This is the communication between your application and the Dapr sidecar. To secure the Dapr API from unauthorized application access, [enable Dapr's token-based authentication]({{< ref "api-token.md" >}}).
|
||||
|
||||
3. **Dapr to App API authentication** is enabled. This is the communication between Dapr and your application. This ensures that Dapr knows that it is communicating with an authorized application. See [Authenticate requests from Dapr using token authentication]({{< ref "app-api-token.md" >}}) for details
|
||||
1. **Dapr to App API authentication** is enabled. This is the communication between Dapr and your application. [Let Dapr know that it is communicating with an authorized application using token authentication]({{< ref "app-api-token.md" >}}).
|
||||
|
||||
4. All component YAMLs should have **secret data configured in a secret store** and not hard-coded in the YAML file. See [here]({{< ref "component-secrets.md" >}}) on how to use secrets with Dapr components
|
||||
1. **Component secret data is configured in a secret store** and not hard-coded in the component YAML file. [Learn how to use secrets with Dapr components]({{< ref "component-secrets.md" >}}).
|
||||
|
||||
5. The Dapr **control plane is installed on a dedicated namespace** such as `dapr-system`.
|
||||
1. The Dapr **control plane is installed on a dedicated namespace**, such as `dapr-system`.
|
||||
|
||||
6. Dapr also supports **scoping components for certain applications**. This is not a required practice, and can be enabled according to your security needs. See [here]({{< ref "component-scopes.md" >}}) for more info.
|
||||
1. Dapr supports and is enabled to **scope components for certain applications**. This is not a required practice. [Learn more about component scopes]({{< ref "component-scopes.md" >}}).
|
||||
|
||||
## Service account tokens
|
||||
|
||||
|
@ -204,47 +225,55 @@ By default, Kubernetes mounts a volume containing a [Service Account token](http
|
|||
|
||||
When creating a new Pod (or a Deployment, StatefulSet, Job, etc), you can disable auto-mounting the Service Account token by setting `automountServiceAccountToken: false` in your pod's spec.
|
||||
|
||||
It is recommended that you consider deploying your apps with `automountServiceAccountToken: false` to improve the security posture of your pods, unless your apps depend on having a Service Account token. For example, you may need a Service Account token if:
|
||||
It's recommended that you consider deploying your apps with `automountServiceAccountToken: false` to improve the security posture of your pods, unless your apps depend on having a Service Account token. For example, you may need a Service Account token if:
|
||||
|
||||
- You are using Dapr components that interact with the Kubernetes APIs, for example the [Kubernetes secret store]({{< ref "kubernetes-secret-store.md" >}}) or the [Kubernetes Events binding]{{< ref "kubernetes-binding.md" >}}).
|
||||
Note that initializing Dapr components using [component secrets]({{< ref "component-secrets.md" >}}) stored as Kubernetes secrets does **not** require a Service Account token, so you can still set `automountServiceAccountToken: false` in this case. Only calling the Kubernetes secret store at runtime, using the [Secrets management]({{< ref "secrets-overview.md" >}}) building block, is impacted.
|
||||
- Your own application needs to interact with the Kubernetes APIs.
|
||||
- Your application needs to interact with the Kubernetes APIs.
|
||||
- You are using Dapr components that interact with the Kubernetes APIs; for example, the [Kubernetes secret store]({{< ref "kubernetes-secret-store.md" >}}) or the [Kubernetes Events binding]({{< ref "kubernetes-binding.md" >}}).
|
||||
|
||||
Because of the reasons above, Dapr does not set `automountServiceAccountToken: false` automatically for you. However, in all situations where the Service Account is not required by your solution, it is recommended that you set this option in the pods spec.
|
||||
Thus, Dapr does not set `automountServiceAccountToken: false` automatically for you. However, in all situations where the Service Account is not required by your solution, it's recommended that you set this option in the pods spec.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Initializing Dapr components using [component secrets]({{< ref "component-secrets.md" >}}) stored as Kubernetes secrets does **not** require a Service Account token, so you can still set `automountServiceAccountToken: false` in this case. Only calling the Kubernetes secret store at runtime, using the [Secrets management]({{< ref "secrets-overview.md" >}}) building block, is impacted.
|
||||
{{% /alert %}}
|
||||
|
||||
## Tracing and metrics configuration
|
||||
|
||||
Dapr has tracing and metrics enabled by default. It is *recommended* that you set up distributed tracing and metrics for your applications and the Dapr control plane in production.
|
||||
Tracing and metrics are enabled in Dapr by default. It's recommended that you set up distributed tracing and metrics for your applications and the Dapr control plane in production.
|
||||
|
||||
If you already have your own observability set-up, you can disable tracing and metrics for Dapr.
|
||||
If you already have your own observability setup, you can disable tracing and metrics for Dapr.
|
||||
|
||||
### Tracing
|
||||
|
||||
To configure a tracing backend for Dapr visit [this]({{< ref "setup-tracing.md" >}}) link.
|
||||
[Configure a tracing backend for Dapr]({{< ref "setup-tracing.md" >}}).
|
||||
|
||||
### Metrics
|
||||
|
||||
For metrics, Dapr exposes a Prometheus endpoint listening on port 9090 which can be scraped by Prometheus.
|
||||
For metrics, Dapr exposes a Prometheus endpoint listening on port 9090, which can be scraped by Prometheus.
|
||||
|
||||
To setup Prometheus, Grafana and other monitoring tools with Dapr, visit [this]({{< ref "monitoring" >}}) link.
|
||||
[Set up Prometheus, Grafana, and other monitoring tools with Dapr]({{< ref "observability" >}}).
|
||||
|
||||
## Injector watchdog
|
||||
|
||||
The Dapr Operator service includes an _injector watchdog_ which can be used to detect and remediate situations where your application's pods may be deployed without the Dapr sidecar (the `daprd` container) when they should have been. For example, it can assist with recovering the applications after a total cluster failure.
|
||||
The Dapr Operator service includes an **injector watchdog**, which can be used to detect and remediate situations where your application's pods may be deployed without the Dapr sidecar (the `daprd` container). For example, it can assist with recovering the applications after a total cluster failure.
|
||||
|
||||
The injector watchdog is disabled by default when running Dapr in Kubernetes mode and it is recommended that you consider enabling it with values that are appropriate for your specific situation.
|
||||
The injector watchdog is disabled by default when running Dapr in Kubernetes mode. However, you should consider enabling it with the appropriate values for your specific situation.
|
||||
|
||||
Refer to the documentation for the [Dapr operator]({{< ref operator >}}) service for more details on the injector watchdog and how to enable it.
|
||||
Refer to the [Dapr operator service documentation]({{< ref operator >}}) for more details on the injector watchdog and how to enable it.
|
||||
|
||||
## Configuring seccompProfile for sidecar containers
|
||||
## Configure `seccompProfile` for sidecar containers
|
||||
|
||||
By default, the Dapr sidecar Injector injects a sidecar without any `seccompProfile`. However, to have Dapr sidecar container run successfully in a namespace with [Restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) profile, the sidecar container needs to have `securityContext.seccompProfile.Type` to not be `nil`.
|
||||
By default, the Dapr sidecar injector injects a sidecar without any `seccompProfile`. However, for the Dapr sidecar container to run successfully in a namespace with the [Restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) profile, the sidecar container needs `securityContext.seccompProfile.Type` to not be `nil`.
|
||||
|
||||
Refer to [this]({{< ref "arguments-annotations-overview.md" >}}) documentation to set appropriate `seccompProfile` on sidecar container according to which profile it is running with.
|
||||
Refer to [the Arguments and Annotations overview]({{< ref "arguments-annotations-overview.md" >}}) to set the appropriate `seccompProfile` on the sidecar container.
|
||||
|
||||
## Best Practices
|
||||
|
||||
Watch this video for a deep dive into the best practices for running Dapr in production with Kubernetes
|
||||
Watch this video for a deep dive into the best practices for running Dapr in production with Kubernetes.
|
||||
|
||||
<div class="embed-responsive embed-responsive-16by9">
|
||||
<iframe width="360" height="315" src="https://www.youtube-nocookie.com/embed/_U9wJqq-H1g" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
## Related links
|
||||
|
||||
- [Deploy Dapr on Kubernetes]({{< ref kubernetes-deploy.md >}})
|
||||
- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}})
|
|
@ -6,34 +6,37 @@ weight: 30000
|
|||
description: "Follow these steps to upgrade Dapr on Kubernetes and ensure a smooth upgrade."
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
- [Helm 3](https://github.com/helm/helm/releases) (if using Helm)
|
||||
|
||||
## Upgrade existing cluster to {{% dapr-latest-version long="true" %}}
|
||||
There are two ways to upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm.
|
||||
You can upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Refer to the [Dapr version policy]({{< ref "support-release-policy.md#upgrade-paths" >}}) for guidance on which versions of Dapr can be upgraded to which versions.
|
||||
Refer to the [Dapr version policy]({{< ref "support-release-policy.md#upgrade-paths" >}}) for guidance on Dapr's upgrade path.
|
||||
{{% /alert %}}
|
||||
|
||||
### Dapr CLI
|
||||
{{< tabs "Dapr CLI" "Helm" >}}
|
||||
<!-- Dapr CLI -->
|
||||
{{% codetab %}}
|
||||
## Upgrade using the Dapr CLI
|
||||
|
||||
The example below shows how to upgrade to version {{% dapr-latest-version long="true" %}}:
|
||||
You can upgrade Dapr using the [Dapr CLI]({{< ref install-dapr-cli.md >}}).
|
||||
|
||||
```bash
|
||||
dapr upgrade -k --runtime-version={{% dapr-latest-version long="true" %}}
|
||||
```
|
||||
### Prerequisites
|
||||
|
||||
You can provide all the available Helm chart configurations using the Dapr CLI.
|
||||
See [here](https://github.com/dapr/cli#supplying-helm-values) for more info.
|
||||
- [Install the Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
- An existing [Kubernetes cluster running with Dapr]({{< ref cluster >}})
|
||||
|
||||
#### Troubleshooting upgrade using the CLI
|
||||
### Upgrade existing cluster to {{% dapr-latest-version long="true" %}}
|
||||
|
||||
```bash
|
||||
dapr upgrade -k --runtime-version={{% dapr-latest-version long="true" %}}
|
||||
```
|
||||
|
||||
[You can provide all the available Helm chart configurations using the Dapr CLI.](https://github.com/dapr/cli#supplying-helm-values)
|
||||
|
||||
### Troubleshoot upgrading via the CLI
|
||||
|
||||
There is a known issue running upgrades on clusters that may have previously had a version prior to 1.0.0-rc.2 installed on a cluster.
|
||||
|
||||
Most users should not encounter this issue, but there are a few upgrade path edge cases that may leave an incompatible CustomResourceDefinition installed on your cluster. The error message for this case looks like this:
|
||||
While this issue is uncommon, a few upgrade path edge cases may leave an incompatible `CustomResourceDefinition` installed on your cluster. If this is your scenario, you may see an error message like the following:
|
||||
|
||||
```
|
||||
❌ Failed to upgrade Dapr: Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
|
||||
|
@ -41,31 +44,45 @@ The CustomResourceDefinition "configurations.dapr.io" is invalid: spec.preserveU
|
|||
|
||||
```
|
||||
|
||||
To resolve this issue please run the follow command to upgrade the CustomResourceDefinition to a compatible version:
|
||||
#### Solution
|
||||
|
||||
```
|
||||
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/5a15b3e0f093d2d0938b12f144c7047474a290fe/charts/dapr/crds/configuration.yaml
|
||||
```
|
||||
1. Run the following command to upgrade the `CustomResourceDefinition` to a compatible version:
|
||||
|
||||
Then proceed with the `dapr upgrade --runtime-version {{% dapr-latest-version long="true" %}} -k` command as above.
|
||||
```sh
|
||||
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/5a15b3e0f093d2d0938b12f144c7047474a290fe/charts/dapr/crds/configuration.yaml
|
||||
```
|
||||
|
||||
### Helm
|
||||
1. Proceed with the `dapr upgrade --runtime-version {{% dapr-latest-version long="true" %}} -k` command.
|
||||
|
||||
From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive action since existing certificate values will automatically be re-used.
|
||||
{{% /codetab %}}
|
||||
|
||||
1. Upgrade Dapr from 1.0.0 (or newer) to any [NEW VERSION] > 1.0.0:
|
||||
<!-- Helm -->
|
||||
{{% codetab %}}
|
||||
## Upgrade using Helm
|
||||
|
||||
*Helm does not handle upgrading CRDs, so you need to perform that manually. CRDs are backward-compatible and should only be installed forward.*
|
||||
You can upgrade Dapr using a Helm v3 chart.
|
||||
|
||||
>Note: The Dapr version is included in the commands below.
|
||||
❗**Important:** The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
|
||||
For version {{% dapr-latest-version long="true" %}}:
|
||||
### Prerequisites
|
||||
|
||||
- [Install Helm v3](https://github.com/helm/helm/releases)
|
||||
- An existing [Kubernetes cluster running with Dapr]({{< ref cluster >}})
|
||||
|
||||
### Upgrade existing cluster to {{% dapr-latest-version long="true" %}}
|
||||
|
||||
As of version 1.0.0 onwards, existing certificate values will automatically be reused when upgrading Dapr using Helm.
|
||||
|
||||
> **Note** Helm does not handle upgrading resources, so you need to perform that manually. Resources are backward-compatible and should only be installed forward.
|
||||
|
||||
1. Upgrade Dapr to version {{% dapr-latest-version long="true" %}}:
|
||||
|
||||
```bash
|
||||
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/components.yaml
|
||||
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/configuration.yaml
|
||||
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/subscription.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/resiliency.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/httpendpoints.yaml
|
||||
```
|
||||
|
||||
```bash
|
||||
|
@ -75,9 +92,9 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive
|
|||
```bash
|
||||
helm upgrade dapr dapr/dapr --version {{% dapr-latest-version long="true" %}} --namespace dapr-system --wait
|
||||
```
|
||||
*If you're using a values file, remember to add the `--values` option when running the upgrade command.*
|
||||
> If you're using a values file, remember to add the `--values` option when running the upgrade command.*
|
||||
|
||||
2. Ensure all pods are running:
|
||||
1. Ensure all pods are running:
|
||||
|
||||
```bash
|
||||
kubectl get pods -n dapr-system -w
|
||||
|
@ -90,20 +107,23 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive
|
|||
dapr-sidecar-injector-68f868668f-6xnbt 1/1 Running 0 41s
|
||||
```
|
||||
|
||||
3. Restart your application deployments to update the Dapr runtime:
|
||||
1. Restart your application deployments to update the Dapr runtime:
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deploy/<DEPLOYMENT-NAME>
|
||||
```
|
||||
|
||||
4. All done!
|
||||
{{% /codetab %}}
|
||||
|
||||
#### Upgrading existing Dapr to enable high availability mode
|
||||
|
||||
Enabling HA mode in an existing Dapr deployment requires additional steps. Please refer to [this paragraph]({{< ref "kubernetes-production.md#enabling-high-availability-in-an-existing-dapr-deployment" >}}) for more details.
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
## Next steps
|
||||
## Upgrade existing Dapr deployment to enable high availability mode
|
||||
|
||||
[Enable high availability mode in an existing Dapr deployment with a few additional steps.]({{< ref "kubernetes-production.md#enabling-high-availability-in-an-existing-dapr-deployment" >}})
|
||||
|
||||
## Related links
|
||||
|
||||
- [Dapr on Kubernetes]({{< ref kubernetes-overview.md >}})
|
||||
- [Dapr production guidelines]({{< ref kubernetes-production.md >}})
|
||||
- [More on upgrading Dapr with Helm]({{< ref "kubernetes-production.md#upgrade-dapr-with-helm" >}})
|
||||
- [Dapr production guidelines]({{< ref kubernetes-production.md >}})
|
|
@ -6,8 +6,6 @@ weight: 80000
|
|||
description: "Configure the Dapr sidecar to mount Pod Volumes"
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
The Dapr sidecar can be configured to mount any Kubernetes Volume attached to the application Pod. These Volumes can be accessed by the `daprd` (sidecar) container in _read-only_ or _read-write_ modes. If a Volume is configured to be mounted but it does not exist in the Pod, Dapr logs a warning and ignores it.
|
||||
|
||||
For more information on different types of Volumes, check the [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/).
|
||||
|
@ -16,21 +14,30 @@ For more information on different types of Volumes, check the [Kubernetes docume
|
|||
|
||||
You can set the following annotations in your deployment YAML:
|
||||
|
||||
1. **dapr.io/volume-mounts**: for read-only volume mounts
|
||||
1. **dapr.io/volume-mounts-rw**: for read-write volume mounts
|
||||
| Annotation | Description |
|
||||
| ---------- | ----------- |
|
||||
| `dapr.io/volume-mounts` | For read-only volume mounts |
|
||||
| `dapr.io/volume-mounts-rw` | For read-write volume mounts |
|
||||
|
||||
These annotations are comma separated pairs of `volume-name:path/in/container`. Make sure that the corresponding Volumes exist in the Pod spec.
|
||||
These annotations are comma separated pairs of `volume-name:path/in/container`. Verify the corresponding Volumes exist in the Pod spec.
|
||||
|
||||
Within the official container images, Dapr runs as a process with user ID (UID) `65532`. Make sure that folders and files inside the mounted Volume are writable or readable by user `65532` as appropriate.
|
||||
|
||||
Although you can mount a Volume in any folder within the Dapr sidecar container, prevent conflicts and ensure smooth operations going forward by placing all mountpoints within one of these two locations, or in a subfolder within them:
|
||||
Although you can mount a Volume in any folder within the Dapr sidecar container, prevent conflicts and ensure smooth operations going forward by placing all mountpoints within one of the following locations, or in a subfolder within them:
|
||||
|
||||
- `/mnt` is recommended for Volumes containing persistent data that the Dapr sidecar process can read and/or write.
|
||||
- `/tmp` is recommended for Volumes containing temporary data, such as scratch disks.
|
||||
| Location | Description |
|
||||
| -------- | ----------- |
|
||||
| `/mnt` | Recommended for Volumes containing persistent data that the Dapr sidecar process can read and/or write. |
|
||||
| `/tmp` | Recommended for Volumes containing temporary data, such as scratch disks. |
|
||||
|
||||
### Example
|
||||
## Examples
|
||||
|
||||
In the example Deployment resource below, `my-volume1` and `my-volume2` are available inside the sidecar container at `/mnt/sample1` and `/mnt/sample2` respectively, in read-only mode. `my-volume3` is available inside the sidecar container at `/tmp/sample3` in read-write mode.
|
||||
### Basic deployment resource example
|
||||
|
||||
In the example Deployment resource below:
|
||||
- `my-volume1` is available inside the sidecar container at `/mnt/sample1` in read-only mode
|
||||
- `my-volume2` is available inside the sidecar container at `/mnt/sample2` in read-only mode
|
||||
- `my-volume3` is available inside the sidecar container at `/tmp/sample3` in read-write mode
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
|
@ -68,59 +75,57 @@ spec:
|
|||
...
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Custom secrets storage using local file secret store
|
||||
|
||||
Since any type of Kubernetes Volume can be attached to the sidecar, you can use the local file secret store to read secrets from a variety of places. For example, if you have a Network File Share (NFS) server running at `10.201.202.203`, with secrets stored at `/secrets/stage/secrets.json`, you can use that as a secrets storage.
|
||||
|
||||
1. Configure the application pod to mount the NFS and attach it to the Dapr sidecar.
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: myapp
|
||||
...
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
...
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "myapp"
|
||||
dapr.io/app-port: "8000"
|
||||
dapr.io/volume-mounts: "nfs-secrets-vol:/mnt/secrets"
|
||||
spec:
|
||||
volumes:
|
||||
- name: nfs-secrets-vol
|
||||
nfs:
|
||||
server: 10.201.202.203
|
||||
path: /secrets/stage
|
||||
...
|
||||
```
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: myapp
|
||||
...
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
...
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "myapp"
|
||||
dapr.io/app-port: "8000"
|
||||
dapr.io/volume-mounts: "nfs-secrets-vol:/mnt/secrets"
|
||||
spec:
|
||||
volumes:
|
||||
- name: nfs-secrets-vol
|
||||
nfs:
|
||||
server: 10.201.202.203
|
||||
path: /secrets/stage
|
||||
...
|
||||
```
|
||||
|
||||
2. Point the local file secret store component to the attached file.
|
||||
1. Point the local file secret store component to the attached file.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: local-secret-store
|
||||
spec:
|
||||
type: secretstores.local.file
|
||||
version: v1
|
||||
metadata:
|
||||
- name: secretsFile
|
||||
value: /mnt/secrets/secrets.json
|
||||
```
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: local-secret-store
|
||||
spec:
|
||||
type: secretstores.local.file
|
||||
version: v1
|
||||
metadata:
|
||||
- name: secretsFile
|
||||
value: /mnt/secrets/secrets.json
|
||||
```
|
||||
|
||||
3. Use the secrets.
|
||||
1. Use the secrets.
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0/secrets/local-secret-store/my-secret
|
||||
```
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0/secrets/local-secret-store/my-secret
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- [Dapr Kubernetes pod annotations spec]({{< ref arguments-annotations-overview.md >}})
|
||||
[Dapr Kubernetes pod annotations spec]({{< ref arguments-annotations-overview.md >}})
|
||||
|
|
|
@ -16,7 +16,7 @@ This article provides guidance on running Dapr with Docker on a Windows/Linux/ma
|
|||
|
||||
## Initialize Dapr environment
|
||||
|
||||
To initialize the Dapr control-plane containers and create a default configuration file, run:
|
||||
To initialize the Dapr control plane containers and create a default configuration file, run:
|
||||
|
||||
```bash
|
||||
dapr init
|
||||
|
|
|
@ -15,7 +15,7 @@ This article provides guidance on running Dapr with Podman on a Windows/Linux/ma
|
|||
|
||||
## Initialize Dapr environment
|
||||
|
||||
To initialize the Dapr control-plane containers and create a default configuration file, run:
|
||||
To initialize the Dapr control plane containers and create a default configuration file, run:
|
||||
|
||||
```bash
|
||||
dapr init --container-runtime podman
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Observe your application with Dapr"
|
||||
linkTitle: "Observability"
|
||||
weight: 80
|
||||
description: "How to observe and gain insights into your application"
|
||||
---
|
|
@ -1,132 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "How-To: Set up Azure Monitor to search logs and collect metrics"
|
||||
linkTitle: "Azure Monitor"
|
||||
weight: 7000
|
||||
description: "Enable Dapr metrics and logs with Azure Monitor for Azure Kubernetes Service (AKS)"
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/)
|
||||
- [Enable Azure Monitor For containers in AKS](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Helm 3](https://helm.sh/)
|
||||
|
||||
## Enable Prometheus metric scrape using config map
|
||||
|
||||
1. Make sure that omsagents are running
|
||||
|
||||
```bash
|
||||
$ kubectl get pods -n kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
omsagent-75qjs 1/1 Running 1 44h
|
||||
omsagent-c7c4t 1/1 Running 0 44h
|
||||
omsagent-rs-74f488997c-dshpx 1/1 Running 1 44h
|
||||
omsagent-smtk7 1/1 Running 1 44h
|
||||
...
|
||||
```
|
||||
|
||||
2. Apply config map to enable Prometheus metrics endpoint scrape.
|
||||
|
||||
You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable prometheus metrics endpoint scrape.
|
||||
|
||||
If you installed Dapr to the different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example:
|
||||
|
||||
```yaml
|
||||
...
|
||||
prometheus-data-collection-settings: |-
|
||||
[prometheus_data_collection_settings.cluster]
|
||||
interval = "1m"
|
||||
monitor_kubernetes_pods = true
|
||||
monitor_kubernetes_pods_namespaces = ["dapr-system", "default"]
|
||||
[prometheus_data_collection_settings.node]
|
||||
interval = "1m"
|
||||
...
|
||||
```
|
||||
|
||||
Apply config map:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./azm-config.map.yaml
|
||||
```
|
||||
|
||||
## Install Dapr with JSON formatted logs
|
||||
|
||||
1. Install Dapr with enabling JSON-formatted logs
|
||||
|
||||
```bash
|
||||
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
|
||||
```
|
||||
|
||||
2. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations.
|
||||
|
||||
> Note: OMS Agent scrapes the metrics only if replicaset has Prometheus annotations.
|
||||
|
||||
Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml.
|
||||
|
||||
Example:
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: pythonapp
|
||||
namespace: default
|
||||
labels:
|
||||
app: python
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: python
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: python
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "pythonapp"
|
||||
dapr.io/log-as-json: "true"
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9090"
|
||||
prometheus.io/path: "/"
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
## Search metrics and logs with Azure Monitor
|
||||
|
||||
1. Go to Azure Monitor
|
||||
|
||||
2. Search Dapr logs
|
||||
|
||||
Here is an example query, to parse JSON formatted logs and query logs from dapr system processes.
|
||||
|
||||
```
|
||||
ContainerLog
|
||||
| extend parsed=parse_json(LogEntry)
|
||||
| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance']
|
||||
| where level != ""
|
||||
| sort by Time
|
||||
```
|
||||
|
||||
3. Search metrics
|
||||
|
||||
This query, queries process_resident_memory_bytes Prometheus metrics for Dapr system processes and renders timecharts
|
||||
|
||||
```
|
||||
InsightsMetrics
|
||||
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
|
||||
| extend tags=parse_json(Tags)
|
||||
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
|
||||
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
|
||||
| where app startswith "dapr-"
|
||||
| render timechart
|
||||
```
|
||||
|
||||
# References
|
||||
|
||||
* [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration)
|
||||
* [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config)
|
||||
* [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language)
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Tracing"
|
||||
linkTitle: "Tracing"
|
||||
weight: 100
|
||||
description: "How to setup your observability tools to receive application traces"
|
||||
---
|
|
@ -1,72 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Using OpenTelemetry Collector to collect traces to send to AppInsights"
|
||||
linkTitle: "Using the OpenTelemetry for Azure AppInsights"
|
||||
weight: 1000
|
||||
description: "How to push trace events to Azure Application Insights, using the OpenTelemetry Collector."
|
||||
---
|
||||
|
||||
Dapr integrates with [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) using the Zipkin API. This guide walks through an example using Dapr to push trace events to Azure Application Insights, using the OpenTelemetry Collector.
|
||||
|
||||
## Requirements
|
||||
|
||||
A installation of Dapr on Kubernetes.
|
||||
|
||||
## How to configure distributed tracing with Application Insights
|
||||
|
||||
### Setup Application Insights
|
||||
|
||||
1. First, you'll need an Azure account. See instructions [here](https://azure.microsoft.com/free/) to apply for a **free** Azure account.
|
||||
2. Follow instructions [here](https://docs.microsoft.com/azure/azure-monitor/app/create-new-resource) to create a new Application Insights resource.
|
||||
3. Get the Application Insights Intrumentation key from your Application Insights page.
|
||||
|
||||
### Run OpenTelemetry Collector to push to your Application Insights instance
|
||||
|
||||
Install the OpenTelemetry Collector to your Kubernetes cluster to push events to your Application Insights instance
|
||||
|
||||
1. Check out the file [open-telemetry-collector-appinsights.yaml](/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml) and replace the `<INSTRUMENTATION-KEY>` placeholder with your Application Insights Instrumentation Key.
|
||||
|
||||
2. Apply the configuration with `kubectl apply -f open-telemetry-collector-appinsights.yaml`.
|
||||
|
||||
Next, set up both a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector.
|
||||
|
||||
1. Create a collector-config.yaml file with this [content](/docs/open-telemetry-collector/collector-config.yaml)
|
||||
|
||||
2. Apply the configuration with `kubectl apply -f collector-config.yaml`.
|
||||
|
||||
### Deploy your app with tracing
|
||||
|
||||
When running in Kubernetes mode, apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
...
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
metadata:
|
||||
...
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "MyApp"
|
||||
dapr.io/app-port: "8080"
|
||||
dapr.io/config: "appconfig"
|
||||
```
|
||||
|
||||
Some of the quickstarts such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator) already configure these settings, so if you are using those no additional settings are needed.
|
||||
|
||||
That's it! There's no need include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you.
|
||||
|
||||
> **NOTE**: You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters.
|
||||
|
||||
Deploy and run some applications. After a few minutes, you should see tracing logs appearing in your Application Insights resource. You can also use the **Application Map** to examine the topology of your services, as shown below:
|
||||
|
||||

|
||||
|
||||
> **NOTE**: Only operations going through Dapr API exposed by Dapr sidecar (e.g. service invocation or event publishing) are displayed in Application Map topology.
|
||||
|
||||
## Related links
|
||||
* Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md)
|
||||
* How to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}})
|
|
@ -1,74 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Using OpenTelemetry Collector to collect traces"
|
||||
linkTitle: "Using the OpenTelemetry Collector"
|
||||
weight: 900
|
||||
description: "How to use Dapr to push trace events through the OpenTelemetry Collector."
|
||||
---
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Dapr directly writes traces using the OpenTelemetry (OTEL) protocol as the recommended method. For observability tools that support OTEL protocol, you do not need to use the OpenTelemetry Collector.
|
||||
|
||||
Dapr can also write traces using the Zipkin protocol. Previous to supporting the OTEL protocol, combining the Zipkin protocol with the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) enabled you to send traces to observability tools such as AWS X-Ray, Google Cloud Operations Suite, and Azure AppInsights. This approach remains for reference purposes only.
|
||||
{{% /alert %}}
|
||||
|
||||

|
||||
|
||||
## Requirements
|
||||
|
||||
1. A installation of Dapr on Kubernetes.
|
||||
|
||||
2. You are already setting up your trace backends to receive traces.
|
||||
|
||||
3. Check OpenTelemetry Collector exporters [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter) and [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter) to see if your trace backend is supported by the OpenTelemetry Collector. On those linked pages, find the exporter you want to use and read its doc to find out the parameters required.
|
||||
|
||||
## Setting OpenTelemetry Collector
|
||||
|
||||
### Run OpenTelemetry Collector to push to your trace backend
|
||||
|
||||
1. Check out the file [open-telemetry-collector-generic.yaml](/docs/open-telemetry-collector/open-telemetry-collector-generic.yaml) and replace the section marked with `<your-exporter-here>` with the correct settings for your trace exporter. Again, refer to the OpenTelemetry Collector links in the Prerequisites section to determine the correct settings.
|
||||
|
||||
2. Apply the configuration with `kubectl apply -f open-telemetry-collector-generic.yaml`.
|
||||
|
||||
## Set up Dapr to send trace to OpenTelemetry Collector
|
||||
|
||||
### Turn on tracing in Dapr
|
||||
Next, set up both a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector.
|
||||
|
||||
1. Create a collector-config.yaml file with this [content](/docs/open-telemetry-collector/collector-config.yaml)
|
||||
|
||||
2. Apply the configuration with `kubectl apply -f collector-config.yaml`.
|
||||
|
||||
### Deploy your app with tracing
|
||||
|
||||
When running in Kubernetes mode, apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
...
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
metadata:
|
||||
...
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "MyApp"
|
||||
dapr.io/app-port: "8080"
|
||||
dapr.io/config: "appconfig"
|
||||
```
|
||||
|
||||
Some of the quickstarts such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator) already configure these settings, so if you are using those no additional settings are needed.
|
||||
|
||||
That's it! There's no need include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you.
|
||||
|
||||
> **NOTE**: You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters.
|
||||
|
||||
Deploy and run some applications. Wait for the trace to propagate to your tracing backend and view them there.
|
||||
|
||||
## Related links
|
||||
* Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md)
|
||||
* How to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}})
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Observability"
|
||||
linkTitle: "Observability"
|
||||
weight: 60
|
||||
description: See and measure the message calls to components and between networked services
|
||||
---
|
||||
|
||||
[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=3bmNSSyIEIVSF-Ej&t=9931) demonstrates how observability in Dapr works.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/0y7ne6teHT4?si=iURnLk57t2zN-7zP&start=12653" title="YouTube video player" style="padding-bottom:25px;" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
{{% alert title="More about Dapr Observability" color="primary" %}}
|
||||
Learn more about how to use Dapr Observability:
|
||||
- Explore observability via any of the supporting [Dapr SDKs]({{< ref sdks >}}).
|
||||
- Review the [Observability API reference documentation]({{< ref health_api.md >}}).
|
||||
- Read the [general overview of the observability concept]({{< ref observability-concept >}}) in Dapr.
|
||||
{{% /alert %}}
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Logs"
|
||||
linkTitle: "Logs"
|
||||
linkTitle: "Overview"
|
||||
weight: 1000
|
||||
description: "Understand Dapr logging"
|
||||
---
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Metrics"
|
||||
linkTitle: "View metrics"
|
||||
linkTitle: "Metrics"
|
||||
weight: 300
|
||||
description: "How to view Dapr metrics"
|
||||
---
|
|
@ -0,0 +1,134 @@
|
|||
---
|
||||
type: docs
|
||||
title: "How-To: Set up Azure Monitor to search logs and collect metrics"
|
||||
linkTitle: "Azure Monitor"
|
||||
weight: 7000
|
||||
description: "Enable Dapr metrics and logs with Azure Monitor for Azure Kubernetes Service (AKS)"
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/)
|
||||
- [Enable Azure Monitor For containers in AKS](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [Helm 3](https://helm.sh/)
|
||||
|
||||
## Enable Prometheus metric scrape using config map
|
||||
|
||||
1. Make sure that Azure Monitor Agents (AMA) are running.
|
||||
|
||||
```bash
|
||||
$ kubectl get pods -n kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
ama-logs-48kpv 2/2 Running 0 2d13h
|
||||
ama-logs-mx24c 2/2 Running 0 2d13h
|
||||
ama-logs-rs-f9bbb9898-vbt6k 1/1 Running 0 30h
|
||||
ama-logs-sm2mz 2/2 Running 0 2d13h
|
||||
ama-logs-z7p4c 2/2 Running 0 2d13h
|
||||
...
|
||||
```
|
||||
|
||||
1. Apply config map to enable Prometheus metrics endpoint scrape.
|
||||
|
||||
You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable Prometheus metrics endpoint scrape.
|
||||
|
||||
If you installed Dapr to a different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example:
|
||||
|
||||
```yaml
|
||||
...
|
||||
prometheus-data-collection-settings: |-
|
||||
[prometheus_data_collection_settings.cluster]
|
||||
interval = "1m"
|
||||
monitor_kubernetes_pods = true
|
||||
monitor_kubernetes_pods_namespaces = ["dapr-system", "default"]
|
||||
[prometheus_data_collection_settings.node]
|
||||
interval = "1m"
|
||||
...
|
||||
```
|
||||
|
||||
Apply config map:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./azm-config.map.yaml
|
||||
```
|
||||
|
||||
## Install Dapr with JSON formatted logs
|
||||
|
||||
1. Install Dapr with enabling JSON-formatted logs.
|
||||
|
||||
```bash
|
||||
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
|
||||
```
|
||||
|
||||
1. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations.
|
||||
|
||||
> Note: The Azure Monitor Agents (AMA) only sends the metrics if the Prometheus annotations are set.
|
||||
|
||||
Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: pythonapp
|
||||
namespace: default
|
||||
labels:
|
||||
app: python
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: python
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: python
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "pythonapp"
|
||||
dapr.io/log-as-json: "true"
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9090"
|
||||
prometheus.io/path: "/"
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
## Search metrics and logs with Azure Monitor
|
||||
|
||||
1. Go to Azure Monitor in the Azure portal.
|
||||
|
||||
1. Search Dapr **Logs**.
|
||||
|
||||
Here is an example query, to parse JSON formatted logs and query logs from Dapr system processes.
|
||||
|
||||
```
|
||||
ContainerLog
|
||||
| extend parsed=parse_json(LogEntry)
|
||||
| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance']
|
||||
| where level != ""
|
||||
| sort by Time
|
||||
```
|
||||
|
||||
1. Search **Metrics**.
|
||||
|
||||
This query, queries `process_resident_memory_bytes` Prometheus metrics for Dapr system processes and renders timecharts.
|
||||
|
||||
```
|
||||
InsightsMetrics
|
||||
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
|
||||
| extend tags=parse_json(Tags)
|
||||
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
|
||||
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
|
||||
| where app startswith "dapr-"
|
||||
| render timechart
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration)
|
||||
- [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config)
|
||||
- [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language)
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Configure metrics"
|
||||
linkTitle: "Configure metrics"
|
||||
linkTitle: "Overview"
|
||||
weight: 4000
|
||||
description: "Enable or disable Dapr metrics "
|
||||
---
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Tracing"
|
||||
linkTitle: "Tracing"
|
||||
weight: 200
|
||||
description: Learn about tracing scenarios and how to use tracing for visibility in your application
|
||||
---
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Using OpenTelemetry Collector to collect traces to send to App Insights"
|
||||
linkTitle: "Using the OpenTelemetry for Azure App Insights"
|
||||
weight: 1000
|
||||
description: "How to push trace events to Azure Application Insights, using the OpenTelemetry Collector."
|
||||
---
|
||||
|
||||
Dapr integrates with [OpenTelemetry (OTEL) Collector](https://github.com/open-telemetry/opentelemetry-collector) using the Zipkin API. This guide walks through an example using Dapr to push trace events to Azure Application Insights, using the OpenTelemetry Collector.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Install Dapr on Kubernetes]({{< ref kubernetes >}})
|
||||
- [Set up an App Insights resource](https://docs.microsoft.com/azure/azure-monitor/app/create-new-resource) and make note of your App Insights instrumentation key.
|
||||
|
||||
## Set up OTEL Collector to push to your App Insights instance
|
||||
|
||||
To push events to your App Insights instance, install the OTEL Collector to your Kubernetes cluster.
|
||||
|
||||
1. Check out the [`open-telemetry-collector-appinsights.yaml`](/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml) file.
|
||||
|
||||
1. Replace the `<INSTRUMENTATION-KEY>` placeholder with your App Insights instrumentation key.
|
||||
|
||||
1. Apply the configuration with:
|
||||
|
||||
```sh
|
||||
kubectl apply -f open-telemetry-collector-appinsights.yaml
|
||||
```
|
||||
|
||||
## Set up Dapr to send trace to OTEL Collector
|
||||
|
||||
Set up a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector.
|
||||
|
||||
1. Use this [`collector-config.yaml`](/docs/open-telemetry-collector/collector-config.yaml) file to create your own configuration.
|
||||
|
||||
1. Apply the configuration with:
|
||||
|
||||
```sh
|
||||
kubectl apply -f collector-config.yaml
|
||||
```
|
||||
|
||||
## Deploy your app with tracing
|
||||
|
||||
Apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
...
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
metadata:
|
||||
...
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "MyApp"
|
||||
dapr.io/app-port: "8080"
|
||||
dapr.io/config: "appconfig"
|
||||
```
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
If you are using one of the Dapr tutorials, such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator), the `appconfig` configuration is already configured, so no additional settings are needed.
|
||||
{{% /alert %}}
|
||||
|
||||
You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters.
|
||||
|
||||
That's it! There's no need to include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you.
|
||||
|
||||
## View traces
|
||||
|
||||
Deploy and run some applications. After a few minutes, you should see tracing logs appearing in your App Insights resource. You can also use the **Application Map** to examine the topology of your services, as shown below:
|
||||
|
||||

|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Only operations going through Dapr API exposed by Dapr sidecar (for example, service invocation or event publishing) are displayed in Application Map topology.
|
||||
{{% /alert %}}
|
||||
|
||||
## Related links
|
||||
- Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md)
|
||||
- Learn how to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}})
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Using OpenTelemetry Collector to collect traces"
|
||||
linkTitle: "Using the OpenTelemetry Collector"
|
||||
weight: 900
|
||||
description: "How to use Dapr to push trace events through the OpenTelemetry Collector."
|
||||
---
|
||||
|
||||
Dapr directly writes traces using the OpenTelemetry (OTEL) protocol as the **recommended** method. For observability tools that support OTEL protocol, it is recommended to use the OpenTelemetry Collector, as it allows your application to quickly offload data and includes features, such as retries, batching, and encryption. For more information, read the Open Telemetry [documentation](https://opentelemetry.io/docs/collector/#when-to-use-a-collector).
|
||||
|
||||
Dapr can also write traces using the Zipkin protocol. Previous to supporting the OTEL protocol, you use the Zipkin protocol with the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) to send traces to observability tools such as AWS X-Ray, Google Cloud Operations Suite, and Azure Monitor. Both protocol approaches are valid, however OTEL is the recommended choice.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Install Dapr on Kubernetes]({{< ref kubernetes >}})
|
||||
- Verify your trace backends are already set up to receive traces
|
||||
- Review your OTEL Collector exporter's required parameters:
|
||||
- [`opentelemetry-collector-contrib/exporter`](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter)
|
||||
- [`opentelemetry-collector/exporter`](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter)
|
||||
|
||||
## Set up OTEL Collector to push to your trace backend
|
||||
|
||||
1. Check out the [`open-telemetry-collector-generic.yaml`](/docs/open-telemetry-collector/open-telemetry-collector-generic.yaml).
|
||||
|
||||
1. Replace the `<your-exporter-here>` section with the correct settings for your trace exporter.
|
||||
- Refer to the OTEL Collector links in the [prerequisites section]({{< ref "#prerequisites.md" >}}) to determine the correct settings.
|
||||
|
||||
1. Apply the configuration with:
|
||||
|
||||
```sh
|
||||
kubectl apply -f open-telemetry-collector-generic.yaml
|
||||
```
|
||||
|
||||
## Set up Dapr to send traces to OTEL Collector
|
||||
|
||||
Set up a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector.
|
||||
|
||||
1. Use this [`collector-config.yaml`](/docs/open-telemetry-collector/collector-config.yaml) file to create your own configuration.
|
||||
|
||||
1. Apply the configuration with:
|
||||
|
||||
```sh
|
||||
kubectl apply -f collector-config.yaml
|
||||
```
|
||||
|
||||
## Deploy your app with tracing
|
||||
|
||||
Apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
...
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
metadata:
|
||||
...
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-id: "MyApp"
|
||||
dapr.io/app-port: "8080"
|
||||
dapr.io/config: "appconfig"
|
||||
```
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
If you are using one of the Dapr tutorials, such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator), the `appconfig` configuration is already configured, so no additional settings are needed.
|
||||
{{% /alert %}}
|
||||
|
||||
You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters.
|
||||
|
||||
That's it! There's no need to include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you.
|
||||
|
||||
## View traces
|
||||
|
||||
Deploy and run some applications. Wait for the trace to propagate to your tracing backend and view them there.
|
||||
|
||||
## Related links
|
||||
- Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md)
|
||||
- Learn how to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}})
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue