mirror of https://github.com/dapr/docs.git
commit
6ac9deebc2
|
@ -0,0 +1,60 @@
|
|||
# ------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
# Licensed under the MIT License.
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# This script automerges PRs in Dapr.
|
||||
|
||||
import os
|
||||
|
||||
from github import Github
|
||||
|
||||
|
||||
g = Github(os.getenv("GITHUB_TOKEN"))
|
||||
repo = g.get_repo(os.getenv("GITHUB_REPOSITORY"))
|
||||
maintainers = [m.strip() for m in os.getenv("MAINTAINERS").split(',')]
|
||||
|
||||
def fetch_pulls(mergeable_state):
|
||||
return [pr for pr in repo.get_pulls(state='open', sort='created') \
|
||||
if pr.mergeable_state == mergeable_state and 'automerge' in [l.name for l in pr.labels]]
|
||||
|
||||
def is_approved(pr):
|
||||
approvers = [r.user.login for r in pr.get_reviews() if r.state == 'APPROVED' and r.user.login in maintainers]
|
||||
return len([a for a in approvers if repo.get_collaborator_permission(a) in ['admin', 'write']]) > 0
|
||||
|
||||
# First, find a PR that can be merged
|
||||
pulls = fetch_pulls('clean')
|
||||
print(f"Detected {len(pulls)} open pull requests in {repo.name} to be automerged.")
|
||||
merged = False
|
||||
for pr in pulls:
|
||||
if is_approved(pr):
|
||||
# Merge only one PR per run.
|
||||
print(f"Merging PR {pr.html_url}")
|
||||
try:
|
||||
pr.merge(merge_method='squash')
|
||||
merged = True
|
||||
break
|
||||
except:
|
||||
print(f"Failed to merge PR {pr.html_url}")
|
||||
|
||||
if len(pulls) > 0 and not merged:
|
||||
print("No PR was automerged.")
|
||||
|
||||
# Now, update all PRs that are behind.
|
||||
pulls = fetch_pulls('behind')
|
||||
print(f"Detected {len(pulls)} open pull requests in {repo.name} to be updated.")
|
||||
for pr in pulls:
|
||||
if is_approved(pr):
|
||||
# Update all PRs since there is no guarantee they will all pass.
|
||||
print(f"Updating PR {pr.html_url}")
|
||||
try:
|
||||
pr.update_branch()
|
||||
except:
|
||||
print(f"Failed to update PR {pr.html_url}")
|
||||
|
||||
pulls = fetch_pulls('dirty')
|
||||
print(f"Detected {len(pulls)} open pull requests in {repo.name} to be automerged but are in dirty state.")
|
||||
for pr in pulls:
|
||||
print(f"PR is in dirty state: {pr.html_url}")
|
||||
|
||||
print("Done.")
|
|
@ -0,0 +1,26 @@
|
|||
# ------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation and Dapr Contributors.
|
||||
# Licensed under the MIT License.
|
||||
# ------------------------------------------------------------
|
||||
|
||||
name: dapr-automerge
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '*/10 * * * *'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
automerge:
|
||||
if: github.repository_owner == 'dapr'
|
||||
name: Automerge and update PRs.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: pip install PyGithub
|
||||
- name: Automerge and update
|
||||
env:
|
||||
MAINTAINERS: AaronCrawfis,orizohar,msfussell
|
||||
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
|
||||
run: python ./.github/scripts/automerge.py
|
Binary file not shown.
|
@ -1,20 +0,0 @@
|
|||
# Dapr presentations
|
||||
|
||||
Here you can find previous Dapr presentations, as well as a PowerPoint & guidance on how you can give your own Dapr presentation.
|
||||
|
||||
## Previous Dapr presentations
|
||||
|
||||
| Presentation | Recording | Deck |
|
||||
|--------------|-----------|------|
|
||||
| Ignite 2019: Mark Russinovich Presents the Future of Cloud Native Applications | [Link](https://www.youtube.com/watch?v=LAUDVk8PaCY) | [Link](./PastPresentations/2019IgniteCloudNativeApps.pdf)
|
||||
| Azure Community Live: Build microservice applications using DAPR with Mark Fussell | [Link](https://www.youtube.com/watch?v=CgqI7nen-Ng) | N/A
|
||||
|
||||
There are other Dapr resources on the [community](https://github.com/dapr/dapr#community) page.
|
||||
|
||||
## Giving a Dapr presentation
|
||||
|
||||
- Begin by downloading the [Dapr Presentation Deck](./Dapr%20Presentation%20Deck.pptx). This contains slides and diagrams needed to give a Dapr presentation.
|
||||
|
||||
- Next, review the [Docs](../README.md) to make sure you understand the [concepts](../concepts) and [best-practices](../best-practices).
|
||||
|
||||
- Use the Dapr [quickstarts](https://github.com/dapr/quickstarts) repo and [samples](https://github.com/dapr/samples) repo to show demos of how to use Dapr
|
|
@ -41,22 +41,22 @@ The [daprdocs](./daprdocs) directory contains the hugo project, markdown files,
|
|||
```sh
|
||||
git clone https://github.com/dapr/docs.git
|
||||
```
|
||||
3. Change to daprdocs directory:
|
||||
3. Change to daprdocs directory:
|
||||
```sh
|
||||
cd ./docs/daprdocs
|
||||
```
|
||||
4. Update submodules:
|
||||
4. Update submodules:
|
||||
```sh
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
5. Install npm packages:
|
||||
5. Install npm packages:
|
||||
```sh
|
||||
npm install
|
||||
```
|
||||
|
||||
## Run local server
|
||||
1. Make sure you're still in the `daprdocs` directory
|
||||
2. Run
|
||||
2. Run
|
||||
```sh
|
||||
hugo server --disableFastRender
|
||||
```
|
||||
|
|
|
@ -6,14 +6,14 @@ weight: 200
|
|||
description: "Modular best practices accessible over standard HTTP or gRPC APIs"
|
||||
---
|
||||
|
||||
A [building block]({{< ref building-blocks >}}) is an HTTP or gRPC API that can be called from your code and uses one or more Dapr components.
|
||||
A [building block]({{< ref building-blocks >}}) is an HTTP or gRPC API that can be called from your code and uses one or more Dapr components.
|
||||
|
||||
Building blocks address common challenges in building resilient, microservices applications and codify best practices and patterns. Dapr consists of a set of building blocks, with extensibility to add new building blocks.
|
||||
|
||||
The diagram below shows how building blocks expose a public API that is called from your code, using components to implement the building blocks' capability.
|
||||
|
||||
<img src="/images/concepts-building-blocks.png" width=250>
|
||||
|
||||
|
||||
The following are the building blocks provided by Dapr:
|
||||
|
||||
<img src="/images/building_blocks.png" width=1000>
|
||||
|
|
|
@ -7,7 +7,7 @@ description: "Modular functionality used by building blocks and applications"
|
|||
---
|
||||
|
||||
Dapr uses a modular design where functionality is delivered as a component. Each component has an interface definition. All of the components are pluggable so that you can swap out one component with the same interface for another. The [components contrib repo](https://github.com/dapr/components-contrib) is where you can contribute implementations for the component interfaces and extends Dapr's capabilities.
|
||||
|
||||
|
||||
A building block can use any combination of components. For example the [actors]({{<ref "actors-overview.md">}}) building block and the [state management]({{<ref "state-management-overview.md">}}) building block both use [state components](https://github.com/dapr/components-contrib/tree/master/state). As another example, the [Pub/Sub]({{<ref "pubsub-overview.md">}}) building block uses [Pub/Sub components](https://github.com/dapr/components-contrib/tree/master/pubsub).
|
||||
|
||||
You can get a list of current components available in the current hosting environment using the `dapr components` CLI command.
|
||||
|
|
|
@ -31,7 +31,7 @@ The Dapr runtime SDKs have language specific actor frameworks. The .NET SDK for
|
|||
|
||||
### Does Dapr have any SDKs if I want to work with a particular programming language or framework?
|
||||
|
||||
To make using Dapr more natural for different languages, it includes [language specific SDKs]({{<ref sdks>}}) for Go, Java, JavaScript, .NET, Python, PHP, Rust and C++.
|
||||
To make using Dapr more natural for different languages, it includes [language specific SDKs]({{<ref sdks>}}) for Go, Java, JavaScript, .NET, Python, PHP, Rust and C++.
|
||||
|
||||
These SDKs expose the functionality in the Dapr building blocks, such as saving state, publishing an event or creating an actor, through a typed, language API rather than calling the http/gRPC API. This enables you to write a combination of stateless and stateful functions and actors all in the language of their choice. And because these SDKs share the Dapr runtime, you get cross-language actor and functions support.
|
||||
|
||||
|
|
|
@ -9,15 +9,15 @@ description: >
|
|||
|
||||
When building an applications, understanding how the system is behaving is an important part of operating it - this includes having the ability to observe the internal calls of an application, gauging its performance and becoming aware of problems as soon as they occur. This is challenging for any system but even more so for a distributed system comprised of multiple microservices where a flow, made of several calls, may start in one microservices but continue in another. Observability is critical in production environments but also useful during development to understand bottlenecks, improve performance and perform basic debugging across the span of microservices.
|
||||
|
||||
While some data points about an application can be gathered from the underlying infrastructure (e.g. memory consumption, CPU usage), other meaningful information must be collected from an "application aware" layer - one that can show how an important series of calls is executed across microservices. This usually means a developer must add some code to instrument an application for this purpose. Often, instrumentation code is simply meant to send collected data such as traces and metrics to an external monitoring tool or service that can help store, visualize and analyze all this information.
|
||||
While some data points about an application can be gathered from the underlying infrastructure (e.g. memory consumption, CPU usage), other meaningful information must be collected from an "application aware" layer - one that can show how an important series of calls is executed across microservices. This usually means a developer must add some code to instrument an application for this purpose. Often, instrumentation code is simply meant to send collected data such as traces and metrics to an external monitoring tool or service that can help store, visualize and analyze all this information.
|
||||
|
||||
Having to maintain this code, which is not part of the core logic of the application, is another burden on the developer, sometimes requiring understanding monitoring tools APIs, using additional SDKs etc. This instrumentation may also add to the portability challenges of an application which may require different instrumentation depending on where the application is deployed. For example, different cloud providers offer different monitoring solutions and an on-prem deployment might require an on-prem solution.
|
||||
|
||||
## Observability for your application with Dapr
|
||||
When building an application which is leveraging Dapr building blocks to perform service-to-service calls and pub/sub messaging, Dapr offers an advantage in respect to [distributed tracing]({{<ref tracing>}}) because this inter-service communication flows through the Dapr sidecar, the sidecar is in a unique position to offload the burden of application level instrumentation.
|
||||
When building an application which is leveraging Dapr building blocks to perform service-to-service calls and pub/sub messaging, Dapr offers an advantage in respect to [distributed tracing]({{<ref tracing>}}) because this inter-service communication flows through the Dapr sidecar, the sidecar is in a unique position to offload the burden of application level instrumentation.
|
||||
|
||||
### Distributed tracing
|
||||
Dapr can be [configured to emit tracing data]({{<ref setup-tracing.md>}}), and because Dapr does so using widely adopted protocols such as the [Zipkin](https://zipkin.io) protocol, it can be easily integrated with multiple [monitoring backends]({{<ref supported-tracing-backends>}}).
|
||||
Dapr can be [configured to emit tracing data]({{<ref setup-tracing.md>}}), and because Dapr does so using widely adopted protocols such as the [Zipkin](https://zipkin.io) protocol, it can be easily integrated with multiple [monitoring backends]({{<ref supported-tracing-backends>}}).
|
||||
|
||||
<img src="/images/observability-tracing.png" width=1000 alt="Distributed tracing with Dapr">
|
||||
|
||||
|
@ -41,4 +41,4 @@ Dapr generates [logs]({{<ref "logs.md">}}) to provide visibility into sidecar op
|
|||
Metrics are the series of measured values and counts that are collected and stored over time. [Dapr metrics]({{<ref "metrics">}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and system services. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests etc. Dapr [system services metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures, health of the system services including CPU usage, number of actor placements made etc.
|
||||
|
||||
### Health checks
|
||||
The Dapr sidecar exposes an HTTP endpoint for [health checks]({{<ref sidecar-health.md>}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness.
|
||||
The Dapr sidecar exposes an HTTP endpoint for [health checks]({{<ref sidecar-health.md>}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness.
|
||||
|
|
|
@ -38,7 +38,7 @@ Each of these building blocks is independent, meaning that you can use one, some
|
|||
| [**Publish and subscribe**]({{<ref "pubsub-overview.md">}}) | Publishing events and subscribing to topics | tween services enables event-driven architectures to simplify horizontal scalability and make them | silient to failure. Dapr provides at least once message delivery guarantee.
|
||||
| [**Resource bindings**]({{<ref "bindings-overview.md">}}) | Resource bindings with triggers builds further on event-driven architectures for scale and resiliency by receiving and sending events to and from any external source such as databases, queues, file systems, etc.
|
||||
| [**Actors**]({{<ref "actors-overview.md">}}) | A pattern for stateful and stateless objects that make concurrency simple with method and state encapsulation. Dapr provides many capabilities in its actor runtime including concurrency, state, life-cycle management for actor activation/deactivation and timers and reminders to wake-up actors.
|
||||
| [**Observability**]({{<ref "observability-concept.md">}}) | Dapr emit metrics, logs, and traces to debug and monitor both Dapr and user applications. Dapr supports distributed tracing to easily diagnose and serve inter-service calls in production using the W3C Trace Context standard and Open Telemetry to send to different monitoring tools.
|
||||
| [**Observability**]({{<ref "observability-concept.md">}}) | Dapr emit metrics, logs, and traces to debug and monitor both Dapr and user applications. Dapr supports distributed tracing to easily diagnose and serve inter-service calls in production using the W3C Trace Context standard and Open Telemetry to send to different monitoring tools.
|
||||
| [**Secrets**]({{<ref "secrets-overview.md">}}) | Dapr provides secrets management and integrates with public cloud and local secret stores to retrieve the secrets for use in application code.
|
||||
|
||||
## Sidecar architecture
|
||||
|
@ -53,9 +53,9 @@ Dapr can be hosted in multiple environments, including self-hosted on a Windows/
|
|||
|
||||
### Self-hosted
|
||||
|
||||
In [self-hosted mode]({{< ref self-hosted-overview.md >}}) Dapr runs as a separate sidecar process which your service code can call via HTTP or gRPC. Each running service has a Dapr runtime process (or sidecar) which is configured to use state stores, pub/sub, binding components and the other building blocks.
|
||||
In [self-hosted mode]({{< ref self-hosted-overview.md >}}) Dapr runs as a separate sidecar process which your service code can call via HTTP or gRPC. Each running service has a Dapr runtime process (or sidecar) which is configured to use state stores, pub/sub, binding components and the other building blocks.
|
||||
|
||||
You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app) to run a Dapr enabled application on your local machine. Try this out with the [getting started samples]({{< ref getting-started >}}).
|
||||
You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app) to run a Dapr enabled application on your local machine. Try this out with the [getting started samples]({{< ref getting-started >}}).
|
||||
|
||||
<img src="/images/overview_standalone.png" width=1000 alt="Architecture diagram of Dapr in self-hosted mode">
|
||||
|
||||
|
@ -63,7 +63,7 @@ You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app)
|
|||
|
||||
In container hosting environments such as Kubernetes, Dapr runs as a sidecar container with the application container in the same pod.
|
||||
|
||||
The `dapr-sidecar-injector` and `dapr-operator` services provide first class integration to launch Dapr as a sidecar container in the same pod as the service container and provide notifications of Dapr component updates provisioned into the cluster.
|
||||
The `dapr-sidecar-injector` and `dapr-operator` services provide first class integration to launch Dapr as a sidecar container in the same pod as the service container and provide notifications of Dapr component updates provisioned into the cluster.
|
||||
|
||||
The `dapr-sentry` service is a certificate authority that enables mutual TLS between Dapr sidecar instances for secure data encryption. For more information on the `Sentry` service read the [security overview]({{< ref "security-concept.md#dapr-to-dapr-communication" >}})
|
||||
|
||||
|
@ -98,7 +98,7 @@ Dapr can be used from any developer framework. Here are some that have been inte
|
|||
| Language | Frameworks | Description |
|
||||
|----------|------------|-------------|
|
||||
| [.NET]({{< ref dotnet >}}) | [ASP.NET]({{< ref dotnet-aspnet.md >}}) | Brings stateful routing controllers that respond to pub/sub events from other services. Can also take advantage of [ASP.NET Core gRPC Services](https://docs.microsoft.com/en-us/aspnet/core/grpc/).
|
||||
| [Java](https://github.com/dapr/java-sdk) | [Spring Boot](https://spring.io/)
|
||||
| [Java](https://github.com/dapr/java-sdk) | [Spring Boot](https://spring.io/)
|
||||
| [Python]({{< ref python >}}) | [Flask]({{< ref python-flask.md >}})
|
||||
| [Javascript](https://github.com/dapr/js-sdk) | [Express](http://expressjs.com/)
|
||||
| [PHP]({{< ref php >}}) | | You can serve with Apache, Nginx, or Caddyserver.
|
||||
|
|
|
@ -12,7 +12,7 @@ This article addresses multiple security considerations when using Dapr in a dis
|
|||
Several of the areas above are addressed through encryption of data in transit. One of the security mechanisms that Dapr employs for encrypting data in transit is [mutual authentication TLS](https://en.wikipedia.org/wiki/Mutual_authentication) or mTLS. mTLS offers a few key features for network traffic inside your application:
|
||||
|
||||
- Two way authentication - the client proving its identity to the server, and vice-versa
|
||||
- An encrypted channel for all in-flight communication, after two-way authentication is established
|
||||
- An encrypted channel for all in-flight communication, after two-way authentication is established
|
||||
|
||||
Mutual TLS is useful in almost all scenarios, but especially so for systems subject to regulations such as [HIPAA](https://en.wikipedia.org/wiki/Health_Insurance_Portability_and_Accountability_Act) and [PCI](https://en.wikipedia.org/wiki/Payment_Card_Industry_Data_Security_Standard).
|
||||
|
||||
|
@ -60,7 +60,7 @@ In addition to automatic mTLS between Dapr sidecars, Dapr offers mandatory mTLS
|
|||
|
||||
When mTLS is enabled, Sentry writes the root and issuer certificates to a Kubernetes secret that is scoped to the namespace where the control plane is installed. In self hosted mode, Sentry writes the certificates to a configurable filesystem path.
|
||||
|
||||
In Kubernetes, when the Dapr system services start, they automatically mount the secret containing the root and issuer certs and use those to secure the gRPC server that is used by the Dapr sidecar.
|
||||
In Kubernetes, when the Dapr system services start, they automatically mount the secret containing the root and issuer certs and use those to secure the gRPC server that is used by the Dapr sidecar.
|
||||
|
||||
In self hosted mode, each system service can be mounted to a filesystem path to get the credentials.
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ description: >
|
|||
Dapr uses a sidecar architecture, running as a separate process alongside the application and includes features such as, service invocation, network security and distributed tracing. This often raises the question - how does Dapr compare to service mesh solutions such as Linkerd, Istio and Open Service Mesh (OSM)?
|
||||
|
||||
## How Dapr and service meshes compare
|
||||
While Dapr and service meshes do offer some overlapping capabilities, **Dapr is not a service mesh** where a service mesh, is defined as a *networking* service mesh. Unlike a service mesh which is focused on networking concerns, Dapr is focused on providing building blocks that make it easier for developers to build applications as microservices. Dapr is developer-centric versus service meshes being infrastructure-centric.
|
||||
While Dapr and service meshes do offer some overlapping capabilities, **Dapr is not a service mesh** where a service mesh, is defined as a *networking* service mesh. Unlike a service mesh which is focused on networking concerns, Dapr is focused on providing building blocks that make it easier for developers to build applications as microservices. Dapr is developer-centric versus service meshes being infrastructure-centric.
|
||||
|
||||
In most cases, developers do not need to be aware that the application they are building will be deployed in an environment which includes a service mesh since a service mesh intercepts network traffic. Service meshes are mostly managed and deployed by system operators. However, Dapr building block APIs are intended to be used by developers explicitly in their code.
|
||||
|
||||
|
@ -29,7 +29,7 @@ The illustration below captures the overlapping features and unique capabilities
|
|||
<img src="/images/service-mesh.png" width=1000>
|
||||
|
||||
## Using Dapr with a service mesh
|
||||
Dapr does work with service meshes. In the case where both are deployed together, both Dapr and service mesh sidecars are running in the application environment. In this case, it is recommended to configure only Dapr or only the service mesh to perform mTLS encryption and distributed tracing.
|
||||
Dapr does work with service meshes. In the case where both are deployed together, both Dapr and service mesh sidecars are running in the application environment. In this case, it is recommended to configure only Dapr or only the service mesh to perform mTLS encryption and distributed tracing.
|
||||
|
||||
Watch these recordings from the Dapr community calls showing presentations on running Dapr together with different service meshes:
|
||||
- General overview and a demo of [Dapr and Linkerd](https://youtu.be/xxU68ewRmz8?t=142)
|
||||
|
@ -38,7 +38,7 @@ Watch these recordings from the Dapr community calls showing presentations on ru
|
|||
## When to choose using Dapr, a service mesh or both
|
||||
Should you be using Dapr, a service mesh or both? The answer depends on your requirements. If, for example, you are looking to use Dapr for one or more building blocks such as state management or pub/sub and considering using a service mesh just for network security or observability, you may find that Dapr is a good fit and a service mesh is not required.
|
||||
|
||||
Typically you would use a service mesh with Dapr where there is a corporate policy that traffic on the network needs to be encrypted regardless for all applications. For example, you may be using Dapr in only part of your application and other services and processes that are not using Dapr in your application also need encrypted traffic. In this scenario a service mesh is the better option and most likely you should use mTLS and distributed tracing on the service mesh and disable this on Dapr.
|
||||
Typically you would use a service mesh with Dapr where there is a corporate policy that traffic on the network needs to be encrypted regardless for all applications. For example, you may be using Dapr in only part of your application and other services and processes that are not using Dapr in your application also need encrypted traffic. In this scenario a service mesh is the better option and most likely you should use mTLS and distributed tracing on the service mesh and disable this on Dapr.
|
||||
|
||||
If you need traffic splitting for A/B testing scenarios you would benefit from using a service mesh, since Dapr does not provide these capabilities.
|
||||
|
||||
|
|
|
@ -7,11 +7,11 @@ description: >
|
|||
Guidelines for contributing to the Dapr Docs
|
||||
---
|
||||
|
||||
This guide contains information about contributions to the [Dapr docs repository](https://github.com/dapr/docs). Please review the guidelines below before making a contribution to the Dapr docs. This guide assumes you have already reviewed the [general guidance]({{< ref contributing-overview>}}) which applies to any Dapr project contributions.
|
||||
This guide contains information about contributions to the [Dapr docs repository](https://github.com/dapr/docs). Please review the guidelines below before making a contribution to the Dapr docs. This guide assumes you have already reviewed the [general guidance]({{< ref contributing-overview>}}) which applies to any Dapr project contributions.
|
||||
|
||||
Dapr docs are published to [docs.dapr.io](https://docs.dapr.io). Therefore, any contribution must ensure docs can be compiled and published correctly.
|
||||
|
||||
## Prerequisites
|
||||
## Prerequisites
|
||||
The Dapr docs are built using [Hugo](https://gohugo.io/) with the [Docsy](https://docsy.dev) theme. To verify docs are built correctly before submitting a contribution, you should setup your local environment to build and display the docs locally.
|
||||
|
||||
Fork the [docs repository](https://github.com/dapr/docs) to work on any changes
|
||||
|
@ -30,14 +30,14 @@ For example, if you are fixing a typo, adding notes, or clarifying a point, make
|
|||
These conventions should be followed throughout all Dapr documentation to ensure a consistent experience across all docs.
|
||||
|
||||
- **Casing** - Use upper case only at the start of a sentence or for proper nouns including names of technologies (Dapr, Redis, Kubernetes etc.).
|
||||
- **Headers and titles** - Headers and titles must be descriptive and clear, use sentence casing i.e. use the above casing guidance for headers and titles too
|
||||
- **Headers and titles** - Headers and titles must be descriptive and clear, use sentence casing i.e. use the above casing guidance for headers and titles too
|
||||
- **Use simple sentences** - Easy-to-read sentences mean the reader can quickly use the guidance you share.
|
||||
- **Avoid the first person** - Use 2nd person "you", "your" instead of "I", "we", "our".
|
||||
- **Assume a new developer audience** - Some obvious steps can seem hard. E.g. Now set an environment variable Dapr to a value X. It is better to give the reader the explicit command to do this, rather than having them figure this out.
|
||||
- **Use present tense** - Avoid sentences like "this command will install redis", which implies the action is in the future. Instead use "This command installs redis" which is in the present tense.
|
||||
|
||||
## Contributing a new docs page
|
||||
- Make sure the documentation you are writing is in the correct place in the hierarchy.
|
||||
- Make sure the documentation you are writing is in the correct place in the hierarchy.
|
||||
- Avoid creating new sections where possible, there is a good chance a proper place in the docs hierarchy already exists.
|
||||
- Make sure to include a complete [Hugo front-matter](#front-matter).
|
||||
|
||||
|
@ -151,7 +151,7 @@ This HTML will display the `dapr-overview.png` image on the `overview.md` page:
|
|||
```
|
||||
|
||||
### Tabbed content
|
||||
Tabs are made possible through [Hugo shortcodes](https://gohugo.io/content-management/shortcodes/).
|
||||
Tabs are made possible through [Hugo shortcodes](https://gohugo.io/content-management/shortcodes/).
|
||||
|
||||
The overall format is:
|
||||
```
|
||||
|
|
|
@ -7,8 +7,8 @@ description: >
|
|||
General guidance for contributing to any of the Dapr project repositories
|
||||
---
|
||||
|
||||
Thank you for your interest in Dapr!
|
||||
This document provides the guidelines for how to contribute to the [Dapr project](https://github.com/dapr) through issues and pull-requests. Contributions can also come in additional ways such as engaging with the community in community calls, commenting on issues or pull requests and more.
|
||||
Thank you for your interest in Dapr!
|
||||
This document provides the guidelines for how to contribute to the [Dapr project](https://github.com/dapr) through issues and pull-requests. Contributions can also come in additional ways such as engaging with the community in community calls, commenting on issues or pull requests and more.
|
||||
|
||||
See the [Dapr community repository](https://github.com/dapr/community) for more information on community engagement and community membership.
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Giving a presentation on Dapr"
|
||||
linkTitle: "Presentations"
|
||||
weight: 1500
|
||||
description: How to give a presentation on Dapr and examples
|
||||
---
|
||||
|
||||
We welcome community members giving presentations on Dapr and spreading the word about all the awesome Dapr features! We offer a template PowerPoint file to get started.
|
||||
|
||||
{{< button text="Download the Dapr Presentation Deck" link="/presentations/dapr-slidedeck.zip" >}}
|
||||
|
||||
## Giving a Dapr presentation
|
||||
|
||||
- Begin by downloading the [Dapr Presentation Deck](/presentations/dapr-slidedeck.zip). This contains slides and diagrams needed to give a Dapr presentation.
|
||||
- Next, review the docs to make sure you understand the [concepts]({{< ref concepts >}}).
|
||||
- Use the Dapr [quickstarts](https://github.com/dapr/quickstarts) repo and [samples](https://github.com/dapr/samples) repo to show demos of how to use Dapr.
|
||||
|
||||
## Previous Dapr presentations
|
||||
|
||||
| Presentation | Recording | Deck |
|
||||
|--------------|-----------|------|
|
||||
| Ignite 2019: Mark Russinovich Presents the Future of Cloud Native Applications | [Link](https://www.youtube.com/watch?v=LAUDVk8PaCY) | [Link](/presentations/2019IgniteCloudNativeApps.pdf)
|
||||
| Azure Community Live: Build microservice applications using DAPR with Mark Fussell | [Link](https://www.youtube.com/watch?v=CgqI7nen-Ng) | N/A
|
||||
| Ready 2020: Mark Russinovich Presents Cloud Native Applications | [Link](https://youtu.be/eJCu6a-x9uo?t=1614) | [Link](/presentations/2020ReadyCloudNativeApps.pdf)
|
||||
| Ignite 2021: Mark Russinovich Presents Dapr v1.0 Release | [Link](https://youtu.be/69PrhWQorEM?t=3789) | N/A
|
||||
|
||||
## Additional resources
|
||||
|
||||
There are other Dapr resources on the [community](https://github.com/dapr/community) repo.
|
|
@ -8,7 +8,7 @@ aliases:
|
|||
- "/developing-applications/building-blocks/actors/actors-background"
|
||||
---
|
||||
|
||||
## Introduction
|
||||
## Introduction
|
||||
The [actor pattern](https://en.wikipedia.org/wiki/Actor_model) describes actors as the lowest-level "unit of computation". In other words, you write your code in a self-contained unit (called an actor) that receives messages and processes them one at a time, without any kind of concurrency or threading.
|
||||
|
||||
While your code processes a message, it can send one or more messages to other actors, or create new actors. An underlying runtime manages how, when and where each actor runs, and also routes messages between actors.
|
||||
|
@ -77,7 +77,7 @@ POST/GET/PUT/DELETE http://localhost:3500/v1.0/actors/<actorType>/<actorId>/<met
|
|||
|
||||
You can provide any data for the actor method in the request body, and the response for the request would be in the response body which is the data from actor call.
|
||||
|
||||
Refer to [Dapr Actor Features]({{< ref actors-overview.md >}}) for more details.
|
||||
Refer to [Dapr Actor Features]({{< ref howto-actors.md >}}) for more details.
|
||||
|
||||
### Concurrency
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ In order to invoke an output binding:
|
|||
|
||||
Read the [Use output bindings to interface with external resources]({{< ref howto-bindings.md >}}) page to get started with output bindings.
|
||||
|
||||
## Next Steps
|
||||
## Next Steps
|
||||
* Follow these guides on:
|
||||
* [How-To: Trigger a service from different resources with input bindings]({{< ref howto-triggers.md >}})
|
||||
* [How-To: Use output bindings to interface with external resources]({{< ref howto-bindings.md >}})
|
||||
|
|
|
@ -93,4 +93,4 @@ You can check [here]({{< ref supported-bindings >}}) which operations are suppor
|
|||
|
||||
- [Binding API]({{< ref bindings_api.md >}})
|
||||
- [Binding components]({{< ref bindings >}})
|
||||
- [Binding detailed specifications]({{< ref supported-bindings >}})
|
||||
- [Binding detailed specifications]({{< ref supported-bindings >}})
|
||||
|
|
|
@ -9,9 +9,9 @@ description: Dapr sidecar health checks.
|
|||
Dapr provides a way to determine it's health using an HTTP /healthz endpoint.
|
||||
With this endpoint, the Dapr process, or sidecar, can be probed for its health and hence determine its readiness and liveness. See [health API ]({{< ref health_api.md >}})
|
||||
|
||||
The Dapr `/healthz` endpoint can be used by health probes from the application hosting platform. This topic describes how Dapr integrates with probes from different hosting platforms.
|
||||
The Dapr `/healthz` endpoint can be used by health probes from the application hosting platform. This topic describes how Dapr integrates with probes from different hosting platforms.
|
||||
|
||||
As a user, when deploying Dapr to a hosting platform (for example Kubernetes), the Dapr health endpoint is automatically configured for you. There is nothing you need to configure.
|
||||
As a user, when deploying Dapr to a hosting platform (for example Kubernetes), the Dapr health endpoint is automatically configured for you. There is nothing you need to configure.
|
||||
|
||||
Note: Dapr actors also have a health API endpoint where Dapr probes the application for a response to a signal from Dapr that the actor application is healthy and running. See [actor health API]({{< ref "actors_api.md#health-check" >}})
|
||||
|
||||
|
@ -24,7 +24,7 @@ For example, liveness probes could catch a deadlock, where an application is run
|
|||
|
||||
The kubelet uses readiness probes to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this readiness signal is to control which Pods are used as backends for Kubernetes services. When a pod is not ready, it is removed from Kubernetes service load balancers.
|
||||
|
||||
When integrating with Kubernetes, the Dapr sidecar is injected with a Kubernetes probe configuration telling it to use the Dapr healthz endpoint. This is done by the `Sidecar Injector` system service. The integration with the kubelet is shown in the diagram below.
|
||||
When integrating with Kubernetes, the Dapr sidecar is injected with a Kubernetes probe configuration telling it to use the Dapr healthz endpoint. This is done by the `Sidecar Injector` system service. The integration with the kubelet is shown in the diagram below.
|
||||
|
||||
<img src="/images/security-mTLS-dapr-system-services.png" width=600>
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ f.SpanContextToRequest(traceContext, req)
|
|||
traceContext := span.SpanContext()
|
||||
traceContextBinary := propagation.Binary(traceContext)
|
||||
```
|
||||
|
||||
|
||||
You can then pass the trace context through [gRPC metadata](https://google.golang.org/grpc/metadata) through `grpc-trace-bin` header.
|
||||
|
||||
```go
|
||||
|
|
|
@ -156,7 +156,7 @@ CORS(app)
|
|||
@app.route('/dsstatus', methods=['POST'])
|
||||
def ds_subscriber():
|
||||
print(request.json, flush=True)
|
||||
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
|
||||
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
|
||||
|
||||
app.run()
|
||||
```
|
||||
|
@ -232,7 +232,7 @@ dapr --app-id app1 --app-port 3000 run -- php -S 0.0.0.0:3000 app1.php
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Programmatic subscriptions
|
||||
### Programmatic subscriptions
|
||||
|
||||
To subscribe to topics, start a web server in the programming language of your choice and listen on the following `GET` endpoint: `/dapr/subscribe`.
|
||||
The Dapr instance calls into your app at startup and expect a JSON response for the topic subscriptions with:
|
||||
|
@ -265,7 +265,7 @@ def subscribe():
|
|||
@app.route('/dsstatus', methods=['POST'])
|
||||
def ds_subscriber():
|
||||
print(request.json, flush=True)
|
||||
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
|
||||
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
|
||||
app.run()
|
||||
```
|
||||
After creating `app1.py` ensure flask and flask_cors are installed:
|
||||
|
@ -296,7 +296,7 @@ app.get('/dapr/subscribe', (req, res) => {
|
|||
{
|
||||
pubsubname: "pubsub",
|
||||
topic: "deathStarStatus",
|
||||
route: "dsstatus"
|
||||
route: "dsstatus"
|
||||
}
|
||||
]);
|
||||
})
|
||||
|
@ -355,10 +355,10 @@ The `/dsstatus` endpoint matches the `route` defined in the subscriptions and th
|
|||
|
||||
To publish a topic you need to run an instance of a Dapr sidecar to use the pubsub Redis component. You can use the default Redis component installed into your local environment.
|
||||
|
||||
Start an instance of Dapr with an app-id called `testpubsub`:
|
||||
Start an instance of Dapr with an app-id called `testpubsub`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id testpubsub --dapr-http-port 3500
|
||||
dapr run --app-id testpubsub --dapr-http-port 3500
|
||||
```
|
||||
{{< tabs "Dapr CLI" "HTTP API (Bash)" "HTTP API (PowerShell)">}}
|
||||
|
||||
|
@ -402,7 +402,7 @@ In order to tell Dapr that a message was processed successfully, return a `200 O
|
|||
@app.route('/dsstatus', methods=['POST'])
|
||||
def ds_subscriber():
|
||||
print(request.json, flush=True)
|
||||
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
|
||||
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
@ -421,7 +421,7 @@ app.post('/dsstatus', (req, res) => {
|
|||
{{< tabs Node PHP>}}
|
||||
|
||||
{{% codetab %}}
|
||||
If you prefer publishing a topic using code, here is an example.
|
||||
If you prefer publishing a topic using code, here is an example.
|
||||
|
||||
```javascript
|
||||
const express = require('express');
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Message Time-to-Live (TTL)"
|
||||
linkTitle: "Message TTL"
|
||||
weight: 6000
|
||||
description: "Use time-to-live in Pub/Sub messages."
|
||||
description: "Use time-to-live in Pub/Sub messages."
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
|
|
@ -10,7 +10,7 @@ description: "Overview of the Pub/Sub building block"
|
|||
|
||||
The [publish/subscribe pattern](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) allows microservices to communicate with each other using messages. The **producer or publisher** sends messages to a **topic** without knowledge of what application will receive them. This involves writing them to an input channel. Similarly, a **consumer or subscriber** subscribes to the topic and receive its messages without any knowledge of what service produced these messages. This involves receiving messages from an output channel. An intermediary message broker is responsible for copying each message from an input channel to an output channels for all subscribers interested in that message. This pattern is especially useful when you need to decouple microservices from one another.
|
||||
|
||||
The publish/subscribe API in Dapr provides an at-least-once guarantee and integrates with various message brokers and queuing systems. The specific implementation used by your service is pluggable and configured as a Dapr pub/sub component at runtime. This approach removes the dependency from your service and, as a result, makes your service more portable and flexible to changes.
|
||||
The publish/subscribe API in Dapr provides an at-least-once guarantee and integrates with various message brokers and queuing systems. The specific implementation used by your service is pluggable and configured as a Dapr pub/sub component at runtime. This approach removes the dependency from your service and, as a result, makes your service more portable and flexible to changes.
|
||||
|
||||
The complete list of Dapr pub/sub components is [here]({{< ref supported-pubsub >}}).
|
||||
|
||||
|
@ -62,13 +62,13 @@ The following example shows an XML content in CloudEvent v1.0 serialized as JSON
|
|||
}
|
||||
```
|
||||
|
||||
### Message subscription
|
||||
### Message subscription
|
||||
|
||||
Dapr applications can subscribe to published topics. Dapr allows two methods by which your applications can subscribe to topics:
|
||||
|
||||
- **Declarative**, where a subscription is defined in an external file,
|
||||
- **Declarative**, where a subscription is defined in an external file,
|
||||
- **Programmatic**, where a subscription is defined in the user code.
|
||||
|
||||
|
||||
Both declarative and programmatic approaches support the same features. The declarative approach removes the Dapr dependency from your code and allows for existing applications to subscribe to topics, without having to change code. The programmatic approach implements the subscription in your code.
|
||||
|
||||
For more information read [How-To: Publish a message and subscribe to a topic]({{< ref howto-publish-subscribe >}}).
|
||||
|
@ -89,7 +89,7 @@ The burden of dealing with concepts like consumer groups and multiple applicatio
|
|||
<img src="/images/pubsub-overview-pattern-competing-consumers.png" width=1000>
|
||||
<br></br>
|
||||
|
||||
Similarly, if two different applications (different app-IDs) subscribe to the same topic, Dapr deliver each message to *only one instance of **each** application*.
|
||||
Similarly, if two different applications (different app-IDs) subscribe to the same topic, Dapr deliver each message to *only one instance of **each** application*.
|
||||
|
||||
### Topic scoping
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Scope Pub/Sub topic access"
|
||||
linkTitle: "Scope topic access"
|
||||
weight: 5000
|
||||
description: "Use scopes to limit Pub/Sub topics to specific applications"
|
||||
description: "Use scopes to limit Pub/Sub topics to specific applications"
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
@ -34,7 +34,7 @@ To use this topic scoping three metadata properties can be set for a pub/sub com
|
|||
- If `allowedTopics` is not set (default behavior), all topics are valid. `subscriptionScopes` and `publishingScopes` still take place if present.
|
||||
- `publishingScopes` or `subscriptionScopes` can be used in conjuction with `allowedTopics` to add granular limitations
|
||||
|
||||
These metadata properties can be used for all pub/sub components. The following examples use Redis as pub/sub component.
|
||||
These metadata properties can be used for all pub/sub components. The following examples use Redis as pub/sub component.
|
||||
|
||||
## Example 1: Scope topic access
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ weight: 2000
|
|||
description: "Use the secret store building block to securely retrieve a secret"
|
||||
---
|
||||
|
||||
This article provides guidance on using Dapr's secrets API in your code to leverage the [secrets store building block]({{<ref secrets-overview>}}). The secrets API allows you to easily retrieve secrets in your application code from a configured secret store.
|
||||
This article provides guidance on using Dapr's secrets API in your code to leverage the [secrets store building block]({{<ref secrets-overview>}}). The secrets API allows you to easily retrieve secrets in your application code from a configured secret store.
|
||||
|
||||
## Set up a secret store
|
||||
|
||||
|
@ -77,7 +77,7 @@ func main() {
|
|||
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
|
|
|
@ -30,11 +30,11 @@ For example, the diagram below shows an application requesting the secret called
|
|||
|
||||
<img src="/images/secrets-overview-cloud-stores.png" width=600>
|
||||
|
||||
Applications can use the secrets API to access secrets from a Kubernetes secret store. In the example below, the application retrieves the same secret "mysecret" from a Kubernetes secret store.
|
||||
Applications can use the secrets API to access secrets from a Kubernetes secret store. In the example below, the application retrieves the same secret "mysecret" from a Kubernetes secret store.
|
||||
|
||||
<img src="/images/secrets-overview-kubernetes-store.png" width=600>
|
||||
|
||||
In Azure Dapr can be configured to use Managed Identities to authenticate with Azure Key Vault in order to retrieve secrets. In the example below, an Azure Kubernetes Service (AKS) cluster is configured to use managed identities. Then Dapr uses [pod identities](https://docs.microsoft.com/en-us/azure/aks/operator-best-practices-identity#use-pod-identities) to retrieve secrets from Azure Key Vault on behalf of the application.
|
||||
In Azure Dapr can be configured to use Managed Identities to authenticate with Azure Key Vault in order to retrieve secrets. In the example below, an Azure Kubernetes Service (AKS) cluster is configured to use managed identities. Then Dapr uses [pod identities](https://docs.microsoft.com/en-us/azure/aks/operator-best-practices-identity#use-pod-identities) to retrieve secrets from Azure Key Vault on behalf of the application.
|
||||
|
||||
<img src="/images/secrets-overview-azure-aks-keyvault.png" width=600>
|
||||
|
||||
|
@ -46,7 +46,7 @@ For detailed API information read [Secrets API]({{< ref secrets_api.md >}}).
|
|||
|
||||
## Referencing secret stores in Dapr components
|
||||
|
||||
When configuring Dapr components such as state stores it is often required to include credentials in components files. Instead of doing that, you can place the credentials within a Dapr supported secret store and reference the secret within the Dapr component. This is preferred approach and is a recommended best practice especially in production environments.
|
||||
When configuring Dapr components such as state stores it is often required to include credentials in components files. Instead of doing that, you can place the credentials within a Dapr supported secret store and reference the secret within the Dapr component. This is preferred approach and is a recommended best practice especially in production environments.
|
||||
|
||||
For more information read [referencing secret stores in components]({{< ref component-secrets.md >}})
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ type: docs
|
|||
|
||||
You can read [guidance on setting up secret store components]({{< ref setup-secret-store >}}) to configure a secret store for an application. Once configured, by default *any* secret defined within that store is accessible from the Dapr application.
|
||||
|
||||
To limit the secrets to which the Dapr application has access to, you can can define secret scopes by adding a secret scope policy to the application configuration with restrictive permissions. Follow [these instructions]({{< ref configuration-concept.md >}}) to define an application configuration.
|
||||
To limit the secrets to which the Dapr application has access to, you can can define secret scopes by adding a secret scope policy to the application configuration with restrictive permissions. Follow [these instructions]({{< ref configuration-concept.md >}}) to define an application configuration.
|
||||
|
||||
The secret scoping policy applies to any [secret store]({{< ref supported-secret-stores.md >}}), whether that is a local secret store, a Kubernetes secret store or a public cloud secret store. For details on how to set up a [secret stores]({{< ref setup-secret-store.md >}}) read [How To: Retrieve a secret]({{< ref howto-secrets.md >}})
|
||||
|
||||
|
@ -34,13 +34,13 @@ spec:
|
|||
defaultAccess: deny
|
||||
```
|
||||
|
||||
For applications that need to be denied access to the Kubernetes secret store, follow [these instructions]({{< ref kubernetes-overview.md >}}), and add the following annotation to the application pod.
|
||||
For applications that need to be denied access to the Kubernetes secret store, follow [these instructions]({{< ref kubernetes-overview.md >}}), and add the following annotation to the application pod.
|
||||
|
||||
```yaml
|
||||
dapr.io/config: appconfig
|
||||
```
|
||||
|
||||
With this defined, the application no longer has access to any secrets in the Kubernetes secret store.
|
||||
With this defined, the application no longer has access to any secrets in the Kubernetes secret store.
|
||||
|
||||
## Scenario 2 : Allow access to only certain secrets in a secret store
|
||||
|
||||
|
|
|
@ -140,6 +140,6 @@ The example above showed you how to directly invoke a different service running
|
|||
For more information on tracing and logs see the [observability]({{< ref observability-concept.md >}}) article.
|
||||
|
||||
## Related Links
|
||||
|
||||
|
||||
* [Service invocation overview]({{< ref service-invocation-overview.md >}})
|
||||
* [Service invocation API specification]({{< ref service_invocation_api.md >}})
|
||||
|
|
|
@ -8,7 +8,7 @@ description: "Overview of the service invocation building block"
|
|||
|
||||
## Introduction
|
||||
|
||||
Using service invocation, your application can reliably and securely communicate with other applications using the standard [gRPC](https://grpc.io) or [HTTP](https://www.w3.org/Protocols/) protocols.
|
||||
Using service invocation, your application can reliably and securely communicate with other applications using the standard [gRPC](https://grpc.io) or [HTTP](https://www.w3.org/Protocols/) protocols.
|
||||
|
||||
In many environments with multiple services that need to communicate with each other, developers often ask themselves the following questions:
|
||||
|
||||
|
@ -21,15 +21,15 @@ Dapr addresses these challenges by providing a service invocation API that acts
|
|||
|
||||
Dapr uses a sidecar architecture. To invoke an application using Dapr, you use the `invoke` API on any Dapr instance. The sidecar programming model encourages each applications to talk to its own instance of Dapr. The Dapr instances discover and communicate with one another.
|
||||
|
||||
### Service invocation
|
||||
### Service invocation
|
||||
|
||||
The diagram below is an overview of how Dapr's service invocation works.
|
||||
|
||||
<img src="/images/service-invocation-overview.png" width=800 alt="Diagram showing the steps of service invocation">
|
||||
|
||||
1. Service A makes an HTTP or gRPC call targeting Service B. The call goes to the local Dapr sidecar.
|
||||
1. Service A makes an HTTP or gRPC call targeting Service B. The call goes to the local Dapr sidecar.
|
||||
2. Dapr discovers Service B's location using the [name resolution component](https://github.com/dapr/components-contrib/tree/master/nameresolution) which is running on the given [hosting platform]({{< ref "hosting" >}}).
|
||||
3. Dapr forwards the message to Service B's Dapr sidecar
|
||||
3. Dapr forwards the message to Service B's Dapr sidecar
|
||||
|
||||
**Note**: All calls between Dapr sidecars go over gRPC for performance. Only calls between services and Dapr sidecars can be either HTTP or gRPC
|
||||
|
||||
|
@ -43,7 +43,7 @@ Service invocation provides several features to make it easy for you to call met
|
|||
|
||||
### Namespaces scoping
|
||||
|
||||
Service invocation supports calls across namespaces. On all supported hosting platforms, Dapr app IDs conform to a valid FQDN format that includes the target namespace.
|
||||
Service invocation supports calls across namespaces. On all supported hosting platforms, Dapr app IDs conform to a valid FQDN format that includes the target namespace.
|
||||
|
||||
For example, the following string contains the app ID `nodeapp` in addition to the namespace the app runs in `production`.
|
||||
|
||||
|
@ -69,8 +69,8 @@ Applications can control which other applications are allowed to call them and w
|
|||
|
||||
For more information read the [access control allow lists for service invocation]({{< ref invoke-allowlist.md >}}) article.
|
||||
|
||||
#### Example service invocation security
|
||||
The diagram below is an example deployment on a Kubernetes cluster with a Daprized `Ingress` service that calls onto `Service A` using service invocation with mTLS encryption and an applies access control policy. `Service A` then calls onto `Service B` also using service invocation and mTLS. Each service is running in different namespaces for added isolation.
|
||||
#### Example service invocation security
|
||||
The diagram below is an example deployment on a Kubernetes cluster with a Daprized `Ingress` service that calls onto `Service A` using service invocation with mTLS encryption and an applies access control policy. `Service A` then calls onto `Service B` also using service invocation and mTLS. Each service is running in different namespaces for added isolation.
|
||||
|
||||
<img src="/images/service-invocation-security.png" width=800>
|
||||
|
||||
|
@ -84,7 +84,7 @@ Errors that cause retries are:
|
|||
* Authentication errors due to a renewing certificate on the calling/callee Dapr sidecars.
|
||||
|
||||
Per call retries are performed with a backoff interval of 1 second up to a threshold of 3 times.
|
||||
Connection establishment via gRPC to the target sidecar has a timeout of 5 seconds.
|
||||
Connection establishment via gRPC to the target sidecar has a timeout of 5 seconds.
|
||||
|
||||
### Pluggable service discovery
|
||||
|
||||
|
@ -107,7 +107,7 @@ By default, all calls between applications are traced and metrics are gathered t
|
|||
|
||||
### Service invocation API
|
||||
|
||||
The API for service invocation can be found in the [service invocation API reference]({{< ref service_invocation_api.md >}}) which describes how to invoke a method on another service.
|
||||
The API for service invocation can be found in the [service invocation API reference]({{< ref service_invocation_api.md >}}) which describes how to invoke a method on another service.
|
||||
|
||||
## Example
|
||||
Following the above call sequence, suppose you have the applications as described in the [hello world quickstart](https://github.com/dapr/quickstarts/blob/master/hello-world/README.md), where a python app invokes a node.js app. In such a scenario, the python app would be "Service A" , and a Node.js app would be "Service B".
|
||||
|
|
|
@ -165,7 +165,7 @@ $app = \Dapr\App::create();
|
|||
$app->run(function(\Dapr\State\StateManager $stateManager, \Psr\Log\LoggerInterface $logger) {
|
||||
$stateManager->save_state(store_name: 'statestore', item: new \Dapr\State\StateItem(
|
||||
key: 'myFirstKey',
|
||||
value: 'myFirstValue'
|
||||
value: 'myFirstValue'
|
||||
));
|
||||
$logger->alert('State has been stored');
|
||||
|
||||
|
@ -277,20 +277,20 @@ $app = \Dapr\App::create();
|
|||
$app->run(function(\Dapr\State\StateManager $stateManager, \Psr\Log\LoggerInterface $logger) {
|
||||
$stateManager->save_state(store_name: 'statestore', item: new \Dapr\State\StateItem(
|
||||
key: 'myFirstKey',
|
||||
value: 'myFirstValue'
|
||||
value: 'myFirstValue'
|
||||
));
|
||||
$logger->alert('State has been stored');
|
||||
|
||||
$data = $stateManager->load_state(store_name: 'statestore', key: 'myFirstKey')->value;
|
||||
$logger->alert("Got value: {data}", ['data' => $data]);
|
||||
|
||||
|
||||
$stateManager->delete_keys(store_name: 'statestore', keys: ['myFirstKey']);
|
||||
$data = $stateManager->load_state(store_name: 'statestore', key: 'myFirstKey')->value;
|
||||
$logger->alert("Got value after delete: {data}", ['data' => $data]);
|
||||
});
|
||||
```
|
||||
|
||||
Now run it with:
|
||||
Now run it with:
|
||||
|
||||
```bash
|
||||
dapr --app-id myapp run -- php state-example.php
|
||||
|
@ -394,7 +394,7 @@ You're up and running! Both Dapr and your app logs will appear here.
|
|||
{{% codetab %}}
|
||||
|
||||
To batch load and save state with PHP, just create a "Plain Ole' PHP Object" (POPO) and annotate it with
|
||||
the StateStore annotation.
|
||||
the StateStore annotation.
|
||||
|
||||
Update the `state-example.php` file:
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ Direct queries of the state store are not governed by Dapr concurrency control,
|
|||
|
||||
### State management API
|
||||
|
||||
The API for state management can be found in the [state management API reference]({{< ref state_api.md >}}) which describes how to retrieve, save and delete state values by providing keys.
|
||||
The API for state management can be found in the [state management API reference]({{< ref state_api.md >}}) which describes how to retrieve, save and delete state values by providing keys.
|
||||
|
||||
## Next steps
|
||||
* Follow these guides on:
|
||||
|
|
|
@ -34,7 +34,7 @@ For versions [2020.1](https://www.jetbrains.com/help/idea/2020.1/tuning-the-ide.
|
|||
|
||||
```powershell
|
||||
%USERPROFILE%\AppData\Roaming\JetBrains\IntelliJIdea2020.1\tools\
|
||||
```
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
||||
|
@ -48,7 +48,7 @@ For versions [2020.1](https://www.jetbrains.com/help/idea/2020.1/tuning-the-ide.
|
|||
{{% codetab %}}
|
||||
```shell
|
||||
~/Library/Application Support/JetBrains/IntelliJIdea2020.1/tools/
|
||||
```
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ Dapr has pre-built Docker remote containers for each of the language SDKs. You c
|
|||
<br /><img src="/images/vscode-remotecontainers-addcontainer.png" alt="Screenshot of adding a remote container" width="700">
|
||||
3. Type `dapr` to filter the list to available Dapr remote containers and choose the language container that matches your application. Note you may need to select `Show All Definitions...`
|
||||
<br /><img src="/images/vscode-remotecontainers-daprcontainers.png" alt="Screenshot of adding a Dapr container" width="700">
|
||||
4. Follow the prompts to rebuild your application in container.
|
||||
4. Follow the prompts to rebuild your application in container.
|
||||
<br /><img src="/images/vscode-remotecontainers-reopen.png" alt="Screenshot of reopening an application in the dev container" width="700">
|
||||
|
||||
#### Example
|
||||
|
|
|
@ -8,7 +8,7 @@ weight: 2000
|
|||
|
||||
Dapr, with its modular building-block approach, along with the 10+ different [pub/sub components]({{< ref pubsub >}}), make it easy to write message processing applications. Since Dapr can run in many environments (e.g. VM, bare-metal, Cloud, or Edge) the autoscaling of Dapr applications is managed by the hosting later.
|
||||
|
||||
For Kubernetes, Dapr integrates with [KEDA](https://github.com/kedacore/keda), an event driven autoscaler for Kubernetes. Many of Dapr's pub/sub components overlap with the scalers provided by [KEDA](https://github.com/kedacore/keda) so it's easy to configure your Dapr deployment on Kubernetes to autoscale based on the back pressure using KEDA.
|
||||
For Kubernetes, Dapr integrates with [KEDA](https://github.com/kedacore/keda), an event driven autoscaler for Kubernetes. Many of Dapr's pub/sub components overlap with the scalers provided by [KEDA](https://github.com/kedacore/keda) so it's easy to configure your Dapr deployment on Kubernetes to autoscale based on the back pressure using KEDA.
|
||||
|
||||
This how-to walks through the configuration of a scalable Dapr application along with the back pressure on Kafka topic, however you can apply this approach to any [pub/sub components]({{< ref pubsub >}}) offered by Dapr.
|
||||
|
||||
|
@ -60,7 +60,7 @@ kubectl -n kafka exec -it kafka-client -- kafka-topics \
|
|||
--if-not-exists
|
||||
```
|
||||
|
||||
## Deploy a Dapr Pub/Sub component
|
||||
## Deploy a Dapr Pub/Sub component
|
||||
|
||||
Next, we'll deploy the Dapr Kafka pub/sub component for Kubernetes. Paste the following YAML into a file named `kafka-pubsub.yaml`:
|
||||
|
||||
|
@ -81,9 +81,9 @@ spec:
|
|||
value: autoscaling-subscriber
|
||||
```
|
||||
|
||||
The above YAML defines the pub/sub component that your application subscribes to, the `demo-topic` we created above. If you used the Kafka Helm install instructions above you can leave the `brokers` value as is. Otherwise, change this to the connection string to your Kafka brokers.
|
||||
The above YAML defines the pub/sub component that your application subscribes to, the `demo-topic` we created above. If you used the Kafka Helm install instructions above you can leave the `brokers` value as is. Otherwise, change this to the connection string to your Kafka brokers.
|
||||
|
||||
Also notice the `autoscaling-subscriber` value set for `consumerID` which is used later to make sure that KEDA and your deployment use the same [Kafka partition offset](http://cloudurable.com/blog/kafka-architecture-topics/index.html#:~:text=Kafka%20continually%20appended%20to%20partitions,fit%20on%20a%20single%20server.).
|
||||
Also notice the `autoscaling-subscriber` value set for `consumerID` which is used later to make sure that KEDA and your deployment use the same [Kafka partition offset](http://cloudurable.com/blog/kafka-architecture-topics/index.html#:~:text=Kafka%20continually%20appended%20to%20partitions,fit%20on%20a%20single%20server.).
|
||||
|
||||
Now, deploy the component to the cluster:
|
||||
|
||||
|
@ -93,7 +93,7 @@ kubectl apply -f kafka-pubsub.yaml
|
|||
|
||||
## Deploy KEDA autoscaler for Kafka
|
||||
|
||||
Next, we will deploy the KEDA scaling object that monitors the lag on the specified Kafka topic and configures the Kubernetes Horizontal Pod Autoscaler (HPA) to scale your Dapr deployment in and out.
|
||||
Next, we will deploy the KEDA scaling object that monitors the lag on the specified Kafka topic and configures the Kubernetes Horizontal Pod Autoscaler (HPA) to scale your Dapr deployment in and out.
|
||||
|
||||
Paste the following into a file named `kafka_scaler.yaml`, and configure your Dapr deployment in the required place:
|
||||
|
||||
|
@ -127,7 +127,7 @@ A few things to review here in the above file:
|
|||
* Similarly the `bootstrapServers` should be set to the same broker connection string used in the `kafka-pubsub.yaml` file
|
||||
* The `consumerGroup` should be set to the same value as the `consumerID` in the `kafka-pubsub.yaml` file
|
||||
|
||||
> Note: setting the connection string, topic, and consumer group to the *same* values for both the Dapr service subscription and the KEDA scaler configuration is critical to ensure the autoscaling works correctly.
|
||||
> Note: setting the connection string, topic, and consumer group to the *same* values for both the Dapr service subscription and the KEDA scaler configuration is critical to ensure the autoscaling works correctly.
|
||||
|
||||
Next, deploy the KEDA scaler to Kubernetes:
|
||||
|
||||
|
|
|
@ -14,16 +14,16 @@ All Dapr components using various AWS services (DynamoDB, SQS, S3, etc) use a st
|
|||
|
||||
None of the following attributes are required, since the AWS SDK may be configured using the default provider chain described in the link above. It's important to test the component configuration and inspect the log output from the Dapr runtime to ensure that components initialize correctly.
|
||||
|
||||
- `region`: Which AWS region to connect to. In some situations (when running Dapr in self-hosted mode, for example) this flag can be provided by the environment variable `AWS_REGION`. Since Dapr sidecar injection doesn't allow configuring environment variables on the Dapr sidecar, it is recommended to always set the `region` attribute in the component spec.
|
||||
- `endpoint`: The endpoint is normally handled internally by the AWS SDK. However, in some situations it might make sense to set it locally - for example if developing against [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html).
|
||||
- `accessKey`: AWS Access key id.
|
||||
- `secretKey`: AWS Secret access key. Use together with `accessKey` to explicitly specify credentials.
|
||||
- `region`: Which AWS region to connect to. In some situations (when running Dapr in self-hosted mode, for example) this flag can be provided by the environment variable `AWS_REGION`. Since Dapr sidecar injection doesn't allow configuring environment variables on the Dapr sidecar, it is recommended to always set the `region` attribute in the component spec.
|
||||
- `endpoint`: The endpoint is normally handled internally by the AWS SDK. However, in some situations it might make sense to set it locally - for example if developing against [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html).
|
||||
- `accessKey`: AWS Access key id.
|
||||
- `secretKey`: AWS Secret access key. Use together with `accessKey` to explicitly specify credentials.
|
||||
- `sessionToken`: AWS Session token. Used together with `accessKey` and `secretKey`. When using a regular IAM user's access key and secret, a session token is normally not required.
|
||||
|
||||
## Alternatives to explicitly specifying credentials in component manifest files
|
||||
In production scenarios, it is recommended to use a solution such as [Kiam](https://github.com/uswitch/kiam) or [Kube2iam](https://github.com/jtblin/kube2iam). If running on AWS EKS, you can [link an IAM role to a Kubernetes service account](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html), which your pod can use.
|
||||
|
||||
All of these solutions solve the same problem: They allow the Dapr runtime process (or sidecar) to retrive credentials dynamically, so that explicit credentials aren't needed. This provides several benefits, such as automated key rotation, and avoiding having to manage secrets.
|
||||
All of these solutions solve the same problem: They allow the Dapr runtime process (or sidecar) to retrive credentials dynamically, so that explicit credentials aren't needed. This provides several benefits, such as automated key rotation, and avoiding having to manage secrets.
|
||||
|
||||
Both Kiam and Kube2IAM work by intercepting calls to the [instance metadata service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).
|
||||
|
||||
|
@ -31,16 +31,16 @@ Both Kiam and Kube2IAM work by intercepting calls to the [instance metadata serv
|
|||
If running Dapr directly on an AWS EC2 instance in stand-alone mode, instance profiles can be used. Simply configure an iam role and [attach it to the instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) for the ec2 instance, and Dapr should be able to authenticate to AWS without specifying credentials in the Dapr component manifest.
|
||||
|
||||
## Authenticating to AWS when running dapr locally in stand-alone mode
|
||||
When running Dapr (or the Dapr runtime directly) in stand-alone mode, you have the option of injecting environment variables into the process like this (on Linux/MacOS:
|
||||
When running Dapr (or the Dapr runtime directly) in stand-alone mode, you have the option of injecting environment variables into the process like this (on Linux/MacOS:
|
||||
```bash
|
||||
FOO=bar daprd --app-id myapp
|
||||
```
|
||||
```
|
||||
If you have [configured named AWS profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) locally , you can tell Dapr (or the Dapr runtime) which profile to use by specifying the "AWS_PROFILE" environment variable:
|
||||
|
||||
```bash
|
||||
AWS_PROFILE=myprofile dapr run...
|
||||
```
|
||||
or
|
||||
or
|
||||
```bash
|
||||
AWS_PROFILE=myprofile daprd...
|
||||
```
|
||||
|
@ -55,7 +55,7 @@ If using AwsHelper, start Dapr like this:
|
|||
```bash
|
||||
AWS_PROFILE=myprofile awshelper dapr run...
|
||||
```
|
||||
or
|
||||
or
|
||||
```bash
|
||||
AWS_PROFILE=myprofile awshelper daprd...
|
||||
```
|
||||
|
|
|
@ -82,7 +82,7 @@ import (
|
|||
// just for this demo
|
||||
ctx := context.Background()
|
||||
data := []byte("ping")
|
||||
|
||||
|
||||
// create the client
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Running Dapr and Open Service Mesh together"
|
||||
linkTitle: "Open Service Mesh"
|
||||
weight: 4000
|
||||
description: "Learn how to run both Open Service Mesh and Dapr on the same Kubernetes cluster"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
[Open Service Mesh (OSM)](https://openservicemesh.io/) is a lightweight, extensible, cloud native service mesh that allows users to uniformly manage, secure, and get out-of-the-box observability features for highly dynamic microservice environments.
|
||||
|
||||
{{< button text="Learn more" link="https://openservicemesh.io/" >}}
|
||||
|
||||
## Dapr integration
|
||||
|
||||
Users are able to leverage both OSM SMI traffic policies and Dapr capabilities on the same Kubernetes cluster. Visit [this guide](https://docs.openservicemesh.io/docs/integrations/demo_dapr/) to get started.
|
||||
|
||||
{{< button text="Deploy OSM and Dapr" link="https://docs.openservicemesh.io/docs/integrations/demo_dapr/" >}}
|
||||
|
||||
## Example
|
||||
|
||||
Watch the OSM team present the OSM and Dapr integration in the 05/18/2021 community call:
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/LSYyTL0nS8Y?start=1916" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Dapr and service meshes]({{< ref service-mesh.md >}})
|
|
@ -6,4 +6,224 @@ description: "Learn how to build workflows using Dapr Workflows and Logic Apps"
|
|||
weight: 4000
|
||||
---
|
||||
|
||||
To enable developers to easily build workflow applications that use Dapr’s capabilities including diagnostics and multi-language support, you can use Dapr workflows. Dapr integrates with workflow engines such as Logic Apps runtime. For more information read [cloud-native workflows using Dapr and Logic Apps](https://cloudblogs.microsoft.com/opensource/2020/05/26/announcing-cloud-native-workflows-dapr-logic-apps/) and visit the [Dapr workflow](https://github.com/dapr/workflows) repo to try out the samples.
|
||||
Dapr Workflows is a lightweight host that allows developers to run cloud-native workflows locally, on-premises or any cloud environment using the [Azure Logic Apps](https://docs.microsoft.com/en-us/azure/logic-apps/logic-apps-overview) workflow engine and Dapr.
|
||||
|
||||
## Benefits
|
||||
|
||||
By using a workflow engine, business logic can be defined in a declarative, no-code fashion so application code doesn't need to change when a workflow changes. Dapr Workflows allows you to use workflows in a distributed application along with these added benefits:
|
||||
|
||||
- **Run workflows anywhere**: on your local machine, on-premises, on Kubernetes or in the cloud
|
||||
- **Built-in observability**: tracing, metrics and mTLS through Dapr
|
||||
- **gRPC and HTTP endpoints** for your workflows
|
||||
- Kick off workflows based on **Dapr bindings** events
|
||||
- Orchestrate complex workflows by **calling back to Dapr** to save state, publish a message and more
|
||||
|
||||
<img src="/images/workflows-diagram.png" width=500 alt="Diagram of Dapr Workflows">
|
||||
|
||||
## How it works
|
||||
|
||||
Dapr Workflows hosts a gRPC server that implements the Dapr Client API.
|
||||
|
||||
This allows users to start workflows using gRPC and HTTP endpoints through Dapr, or start a workflow asynchronously using Dapr bindings.
|
||||
Once a workflow request comes in, Dapr Workflows uses the Logic Apps SDK to execute the workflow.
|
||||
|
||||
## Supported workflow features
|
||||
|
||||
### Supported actions and triggers
|
||||
|
||||
- [HTTP](https://docs.microsoft.com/en-us/azure/connectors/connectors-native-http)
|
||||
- [Schedule](https://docs.microsoft.com/en-us/azure/logic-apps/concepts-schedule-automated-recurring-tasks-workflows)
|
||||
- [Request / Response](https://docs.microsoft.com/en-us/azure/connectors/connectors-native-reqres)
|
||||
|
||||
### Supported control workflows
|
||||
|
||||
- [All control workflows](https://docs.microsoft.com/en-us/azure/connectors/apis-list#control-workflow)
|
||||
|
||||
### Supported data manipulation
|
||||
|
||||
- [All data operations](https://docs.microsoft.com/en-us/azure/connectors/apis-list#manage-or-manipulate-data)
|
||||
|
||||
### Not supported
|
||||
|
||||
- [Managed connectors](https://docs.microsoft.com/en-us/azure/connectors/apis-list#managed-connectors)
|
||||
|
||||
## Example
|
||||
|
||||
Dapr Workflows can be used as the orchestrator for many otherwise complex activities. For example, invoking an external endpoint, saving the data to a state store, publishing the result to a different app or invoking a binding can all be done by calling back into Dapr from the workflow itself.
|
||||
|
||||
This is due to the fact Dapr runs as a sidecar next to the workflow host just as if it was any other app.
|
||||
|
||||
Examine [workflow2.json](/code/workflow.json) as an example of a workflow that does the following:
|
||||
|
||||
1. Calls into Azure Functions to get a JSON response
|
||||
2. Saves the result to a Dapr state store
|
||||
3. Sends the result to a Dapr binding
|
||||
4. Returns the result to the caller
|
||||
|
||||
Since Dapr supports many pluggable state stores and bindings, the workflow becomes portable between different environments (cloud, edge or on-premises) without the user changing the code - *because there is no code involved*.
|
||||
|
||||
## Get started
|
||||
|
||||
Prerequisites:
|
||||
|
||||
1. Install the [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
2. [Azure blob storage account](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-create-account-block-blob?tabs=azure-portal)
|
||||
|
||||
### Self-hosted
|
||||
|
||||
1. Make sure you have the Dapr runtime initialized:
|
||||
|
||||
```bash
|
||||
dapr init
|
||||
```
|
||||
|
||||
1. Set up the environment variables containing the Azure Storage Account credentials:
|
||||
|
||||
{{< tabs Windows "macOS/Linux" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
export STORAGE_ACCOUNT_KEY=<YOUR-STORAGE-ACCOUNT-KEY>
|
||||
export STORAGE_ACCOUNT_NAME=<YOUR-STORAGE-ACCOUNT-NAME>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
set STORAGE_ACCOUNT_KEY=<YOUR-STORAGE-ACCOUNT-KEY>
|
||||
set STORAGE_ACCOUNT_NAME=<YOUR-STORAGE-ACCOUNT-NAME>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
1. Move to the workflows directory and run the sample runtime:
|
||||
|
||||
```bash
|
||||
cd src/Dapr.Workflows
|
||||
|
||||
dapr run --app-id workflows --protocol grpc --port 3500 --app-port 50003 -- dotnet run --workflows-path ../../samples
|
||||
```
|
||||
|
||||
1. Invoke a workflow:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3500/v1.0/invoke/workflows/method/workflow1
|
||||
|
||||
{"value":"Hello from Logic App workflow running with Dapr!"}
|
||||
```
|
||||
|
||||
### Kubernetes
|
||||
|
||||
1. Make sure you have a running Kubernetes cluster and `kubectl` in your path.
|
||||
|
||||
1. Once you have the Dapr CLI installed, run:
|
||||
|
||||
```bash
|
||||
dapr init --kubernetes
|
||||
```
|
||||
|
||||
1. Wait until the Dapr pods have the status `Running`.
|
||||
|
||||
1. Create a Config Map for the workflow:
|
||||
|
||||
```bash
|
||||
kubectl create configmap workflows --from-file ./samples/workflow1. json
|
||||
```
|
||||
|
||||
1. Create a secret containing the Azure Storage Account credentials. Replace the account name and key values below with the actual credentials:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic dapr-workflows --from-literal=accountName=<YOUR-STORAGE-ACCOUNT-NAME> --from-literal=accountKey=<YOUR-STORAGE-ACCOUNT-KEY>
|
||||
```
|
||||
|
||||
1. Deploy Dapr Worfklows:
|
||||
|
||||
```bash
|
||||
kubectl apply -f deploy/deploy.yaml
|
||||
```
|
||||
|
||||
1. Create a port-forward to the dapr workflows container:
|
||||
|
||||
```bash
|
||||
kubectl port-forward deploy/dapr-workflows-host 3500:3500
|
||||
```
|
||||
|
||||
1. Invoke logic apps through Dapr:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3500/v1.0/invoke/workflows/method/workflow1
|
||||
|
||||
{"value":"Hello from Logic App workflow running with Dapr!"}
|
||||
```
|
||||
|
||||
## Invoking workflows using Dapr bindings
|
||||
|
||||
1. First, create any [Dapr binding]({{< ref components-reference >}}) of your choice. See [this]({{< ref howto-triggers >}}) How-To tutorial.
|
||||
|
||||
In order for Dapr Workflows to be able to start a workflow from a Dapr binding event, simply name the binding with the name of the workflow you want it to trigger.
|
||||
|
||||
Here's an example of a Kafka binding that will trigger a workflow named `workflow1`:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: workflow1
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
metadata:
|
||||
- name: topics
|
||||
value: topic1
|
||||
- name: brokers
|
||||
value: localhost:9092
|
||||
- name: consumerGroup
|
||||
value: group1
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
```
|
||||
|
||||
1. Next, apply the Dapr component:
|
||||
|
||||
{{< tabs Self-hosted Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
Place the binding yaml file above in a `components` directory at the root of your application.
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
kubectl apply -f my_binding.yaml
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
1. Once an event is sent to the bindings component, check the logs Dapr Workflows to see the output.
|
||||
|
||||
{{< tabs Self-hosted Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
In standalone mode, the output will be printed to the local terminal.
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
On Kubernetes, run the following command:
|
||||
|
||||
```bash
|
||||
kubectl logs -l app=dapr-workflows-host -c host
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Example
|
||||
|
||||
Watch an example from the Dapr community call:
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/7fP-0Ixmi-w?start=116" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Blog announcement](https://cloudblogs.microsoft.com/opensource/2020/05/26/announcing-cloud-native-workflows-dapr-logic-apps/)
|
||||
- [Repo](https://github.com/dapr/workflows)
|
|
@ -73,7 +73,7 @@ func GetHandler(metadata Metadata) fasthttp.RequestHandler {
|
|||
|
||||
## Adding new middleware components
|
||||
|
||||
Your middleware component can be contributed to the [components-contrib repository](https://github.com/dapr/components-contrib/tree/master/middleware).
|
||||
Your middleware component can be contributed to the [components-contrib repository](https://github.com/dapr/components-contrib/tree/master/middleware).
|
||||
|
||||
After the components-contrib change has been accepted, submit another pull request against the [Dapr runtime repository](https://github.com/dapr/dapr) to register the new middleware type. You'll need to modify **[runtime.WithHTTPMiddleware](https://github.com/dapr/dapr/blob/f4d50b1369e416a8f7b93e3e226c4360307d1313/cmd/daprd/main.go#L394-L424)** method in [cmd/daprd/main.go](https://github.com/dapr/dapr/blob/master/cmd/daprd/main.go) to register your middleware with Dapr's runtime.
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ spec:
|
|||
- name: authHeaderName
|
||||
value: "authorization"
|
||||
- name: forceHTTPS
|
||||
value: "false"
|
||||
value: "false"
|
||||
```
|
||||
## Spec metadata fields
|
||||
| Field | Details | Example |
|
||||
|
|
|
@ -198,7 +198,7 @@ type Result struct {
|
|||
// Whether to allow or deny the incoming request
|
||||
allow bool
|
||||
// Overrides denied response status code; Optional
|
||||
status_code int
|
||||
status_code int
|
||||
// Sets headers on allowed request or denied response; Optional
|
||||
additional_headers map[string]string
|
||||
}
|
||||
|
|
|
@ -37,8 +37,8 @@ The Dapr SDKs are the easiest way for you to get Dapr into your application. Cho
|
|||
| [Go](https://github.com/dapr/go-sdk) | Stable | ✔ | ✔ | |
|
||||
| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ |
|
||||
| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | |
|
||||
| [Rust]() | In development | ✔ | | |
|
||||
| [Javascript]() | In development| ✔ | |
|
||||
| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | |
|
||||
| [Javascript](https://github.com/dapr/js-sdk) | In development| ✔ | |
|
||||
|
||||
## Further reading
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ You can use [Helm](https://helm.sh/) to quickly create a Redis instance in our K
|
|||
2. Run `kubectl get pods` to see the Redis containers now running in your cluster:
|
||||
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-0 1/1 Running 0 69s
|
||||
redis-slave-0 1/1 Running 0 69s
|
||||
|
@ -125,7 +125,7 @@ spec:
|
|||
secretKeyRef:
|
||||
name: redis
|
||||
key: redis-password
|
||||
```
|
||||
```
|
||||
|
||||
This example uses the the kubernetes secret that was created when setting up a cluster with the above instructions.
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@ linkTitle: "Define a component"
|
|||
weight: 40
|
||||
---
|
||||
|
||||
In the [previous step]({{<ref get-started-api.md>}}) you called the Dapr HTTP API to store and retrieve a state from a Redis backed state store. Dapr knew to use the Redis instance that was configured locally on your machine through default component definition files that were created when Dapr was initialized.
|
||||
In the [previous step]({{<ref get-started-api.md>}}) you called the Dapr HTTP API to store and retrieve a state from a Redis backed state store. Dapr knew to use the Redis instance that was configured locally on your machine through default component definition files that were created when Dapr was initialized.
|
||||
|
||||
When building an app, you most likely would create your own component file definitions depending on the building block and specific component that you'd like to use.
|
||||
When building an app, you most likely would create your own component file definitions depending on the building block and specific component that you'd like to use.
|
||||
|
||||
As an example of how to define custom components for your application, you will now create a component definition file to interact with the [secrets building block]({{< ref secrets >}}).
|
||||
|
||||
|
|
|
@ -72,13 +72,13 @@ The output should look like this:
|
|||
|
||||
|
||||
```md
|
||||
__
|
||||
__
|
||||
____/ /___ _____ _____
|
||||
/ __ / __ '/ __ \/ ___/
|
||||
/ /_/ / /_/ / /_/ / /
|
||||
\__,_/\__,_/ .___/_/
|
||||
/_/
|
||||
|
||||
/ /_/ / /_/ / /_/ / /
|
||||
\__,_/\__,_/ .___/_/
|
||||
/_/
|
||||
|
||||
===============================
|
||||
Distributed Application Runtime
|
||||
|
||||
|
|
|
@ -10,14 +10,14 @@ description: "The component certification lifecycle from submission to productio
|
|||
|
||||
Dapr uses a modular design where functionality is delivered as a component. Each component has an interface definition. All of the components are pluggable so that in ideal scenarios, you can swap out one component with the same interface for another. Each component that is used in production, needs to maintain a certain set of technical requirements that ensure the functional compatibility and robustness of the component.
|
||||
|
||||
In general a component needs to be:
|
||||
- compliant with the defined Dapr interfaces
|
||||
In general a component needs to be:
|
||||
- compliant with the defined Dapr interfaces
|
||||
- functionally correct and robust
|
||||
- well documented and maintained
|
||||
|
||||
To make sure a component conforms to the standards set by Dapr, there are a set of tests run against a component in a Dapr maintainers managed environment. Once the tests pass consistently, the maturity level can be determined for a component.
|
||||
To make sure a component conforms to the standards set by Dapr, there are a set of tests run against a component in a Dapr maintainers managed environment. Once the tests pass consistently, the maturity level can be determined for a component.
|
||||
|
||||
## Certification levels
|
||||
## Certification levels
|
||||
|
||||
The levels are as follows:
|
||||
- [Alpha](#alpha)
|
||||
|
@ -48,15 +48,15 @@ All components start at the Alpha stage.
|
|||
- A GA component has a maintainer in the Dapr community or the Dapr maintainers
|
||||
- The component is well documented, tested and maintained across multiple versions of components-contrib repo
|
||||
|
||||
## Conformance tests
|
||||
## Conformance tests
|
||||
|
||||
Each component in the [components-contrib](https://github.com/dapr/components-contrib) repository needs to adhere to a set of interfaces defined by Dapr. Conformance tests are tests that are run on these component definitions with their associated backing services such that the component is tested to be conformant with the Dapr interface specifications and behavior.
|
||||
Each component in the [components-contrib](https://github.com/dapr/components-contrib) repository needs to adhere to a set of interfaces defined by Dapr. Conformance tests are tests that are run on these component definitions with their associated backing services such that the component is tested to be conformant with the Dapr interface specifications and behavior.
|
||||
|
||||
The conformance tests are defined for the following building blocks:
|
||||
The conformance tests are defined for the following building blocks:
|
||||
|
||||
- State store
|
||||
- Secret store
|
||||
- Bindings
|
||||
- Bindings
|
||||
- Pub/Sub
|
||||
|
||||
To understand more about them see the readme [here](https://github.com/dapr/components-contrib/blob/master/tests/conformance/README.md).
|
||||
|
|
|
@ -39,7 +39,7 @@ spec:
|
|||
| spec.type | Y | The type of the component | `state.redis`
|
||||
| spec.version | Y | The version of the component | `v1`
|
||||
| spec.initTimeout | N | The timeout duration for the initialization of the component. Default is 30s | `5m`, `1h`, `20s`
|
||||
| spec.ignoreErrors | N | Tells the Dapr sidecar to continue initialization if the component fails to load. Default is false | `false`
|
||||
| spec.ignoreErrors | N | Tells the Dapr sidecar to continue initialization if the component fails to load. Default is false | `false`
|
||||
| **spec.metadata** | - | **A key/value pair of component specific configuration. See your component definition for fields**|
|
||||
|
||||
### Special metadata values
|
||||
|
|
|
@ -72,9 +72,9 @@ In this example, the Redis component is only accessible to Dapr instances runnin
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Using namespaces with service invocation
|
||||
## Using namespaces with service invocation
|
||||
|
||||
When using service invocation an application in a namespace you have to qualify it with the namespace. For example calling the `ping` method on `myapp` which is scoped to the `production` namespace would be like this.
|
||||
When using service invocation an application in a namespace you have to qualify it with the namespace. For example calling the `ping` method on `myapp` which is scoped to the `production` namespace would be like this.
|
||||
|
||||
```bash
|
||||
https://localhost:3500/v1.0/invoke/myapp.production/method/ping
|
||||
|
@ -98,7 +98,7 @@ Read [Pub/Sub and namespaces]({{< ref "component-scopes.md" >}}) for more inform
|
|||
Developers and operators might want to limit access for one database to a certain application, or a specific set of applications.
|
||||
To achieve this, Dapr allows you to specify `scopes` on the component YAML. Application scopes added to a component limit only the applications with specific IDs to be able to use the component.
|
||||
|
||||
The following example shows how to give access to two Dapr enabled apps, with the app IDs of `app1` and `app2` to the Redis component named `statestore` which itself is in the `production` namespace
|
||||
The following example shows how to give access to two Dapr enabled apps, with the app IDs of `app1` and `app2` to the Redis component named `statestore` which itself is in the `production` namespace
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -123,6 +123,6 @@ scopes:
|
|||
|
||||
## Related links
|
||||
|
||||
- [Configure Pub/Sub components with multiple namespaces]({{< ref "pubsub-namespaces.md" >}})
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Configure Pub/Sub components with multiple namespaces]({{< ref "pubsub-namespaces.md" >}})
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Limit the secrets that can be read from secret stores]({{< ref "secret-scope.md" >}})
|
|
@ -60,11 +60,11 @@ auth:
|
|||
secretStore: <SECRET_STORE_NAME>
|
||||
```
|
||||
|
||||
`SECRET_STORE_NAME` is the name of the configured [secret store component]({{< ref supported-secret-stores >}}). When running in Kubernetes and using a Kubernetes secret store, the field `auth.SecretStore` defaults to `kubernetes` and can be left empty.
|
||||
`SECRET_STORE_NAME` is the name of the configured [secret store component]({{< ref supported-secret-stores >}}). When running in Kubernetes and using a Kubernetes secret store, the field `auth.SecretStore` defaults to `kubernetes` and can be left empty.
|
||||
|
||||
The above component definition tells Dapr to extract a secret named `redis-secret` from the defined secret store and assign the value of the `redis-password` key in the secret to the `redisPassword` field in the Component.
|
||||
|
||||
## Example
|
||||
## Example
|
||||
|
||||
### Referencing a Kubernetes secret
|
||||
|
||||
|
@ -103,7 +103,7 @@ Dapr can restrict access to secrets in a secret store using its configuration. R
|
|||
|
||||
## Kubernetes permissions
|
||||
|
||||
### Default namespace
|
||||
### Default namespace
|
||||
|
||||
When running in Kubernetes, Dapr, during installtion, defines default Role and RoleBinding for secrets access from Kubernetes secret store in the `default` namespace. For Dapr enabled apps that fetch secrets from `default` namespace, a secret can be defined and referenced in components as shown in the example above.
|
||||
|
||||
|
@ -146,5 +146,5 @@ In production scenario to limit Dapr's access to certain secret resources alone,
|
|||
|
||||
## Related links
|
||||
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Limit the secrets that can be read from secret stores]({{< ref "secret-scope.md" >}})
|
||||
|
|
|
@ -55,7 +55,7 @@ spec:
|
|||
value: <integer>
|
||||
```
|
||||
|
||||
## Apply the configuration
|
||||
## Apply the configuration
|
||||
|
||||
Once you have created the component's YAML file, follow these instructions to apply it based on your hosting environment:
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@ title: "Pub/Sub brokers"
|
|||
linkTitle: "Pub/sub brokers"
|
||||
description: "Guidance on setting up different message brokers for Dapr Pub/Sub"
|
||||
weight: 2000
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-pubsub/setup-pubsub-overview/"
|
||||
---
|
||||
|
||||
Dapr integrates with pub/sub message buses to provide applications with the ability to create event-driven, loosely coupled architectures where producers send events to consumers via topics.
|
||||
Dapr integrates with pub/sub message buses to provide applications with the ability to create event-driven, loosely coupled architectures where producers send events to consumers via topics.
|
||||
|
||||
Dapr supports the configuration of multiple, named, pub/sub components *per application*. Each pub/sub component has a name and this name is used when publishing a message topic. Read the [API reference]({{< ref pubsub_api.md >}}) for details on how to publish and subscribe to topics.
|
||||
|
||||
|
|
|
@ -123,6 +123,6 @@ kubectl delete namespace namespace-b
|
|||
|
||||
## Related links
|
||||
|
||||
- [Scope components to one or more applications]({{< ref "component-scopes.md" >}})
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Scope components to one or more applications]({{< ref "component-scopes.md" >}})
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Limit the secrets that can be read from secret stores]({{< ref "secret-scope.md" >}})
|
||||
|
|
|
@ -4,7 +4,7 @@ title: "Secret store components"
|
|||
linkTitle: "Secret stores"
|
||||
description: "Guidance on setting up different secret store components"
|
||||
weight: 3000
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-state-store/secret-stores-overview/"
|
||||
---
|
||||
|
||||
|
@ -55,7 +55,7 @@ spec:
|
|||
value: "[aws_session_token]"
|
||||
```
|
||||
|
||||
## Apply the configuration
|
||||
## Apply the configuration
|
||||
|
||||
Once you have created the component's YAML file, follow these instructions to apply it based on your hosting environment:
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ title: "State stores components"
|
|||
linkTitle: "State stores"
|
||||
description: "Guidance on setting up different state stores for Dapr state management"
|
||||
weight: 1000
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-state-store/setup-state-store-overview/"
|
||||
---
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ The following tables lists the different properties for access control, policies
|
|||
|
||||
| Property | Type | Description |
|
||||
|----------|--------|-------------|
|
||||
| name | string | Path name of the operations allowed on the called app. Wildcard "\*" can be used to under a path to match
|
||||
| name | string | Path name of the operations allowed on the called app. Wildcard "\*" can be used to under a path to match
|
||||
| httpVerb | list | List specific http verbs that can be used by the calling app. Wildcard "\*" can be used to match any http verb. Unused for grpc invocation
|
||||
| action | string | Access modifier. Accepted values "allow" (default) or "deny"
|
||||
|
||||
|
@ -192,12 +192,12 @@ spec:
|
|||
namespace: "ns2"
|
||||
```
|
||||
|
||||
## Hello world examples
|
||||
## Hello world examples
|
||||
These examples show how to apply access control to the [hello world](https://github.com/dapr/quickstarts#quickstarts) quickstart samples where a python app invokes a node.js app.
|
||||
Access control lists rely on the Dapr [Sentry service]({{< ref "security-concept.md" >}}) to generate the TLS certificates with a SPIFFE id for authentication, which means the Sentry service either has to be running locally or deployed to your hosting enviroment such as a Kubernetes cluster.
|
||||
|
||||
The nodeappconfig example below shows how to **deny** access to the `neworder` method from the `pythonapp`, where the python app is in the `myDomain` trust domain and `default` namespace. The nodeapp is in the `public` trust domain.
|
||||
|
||||
The nodeappconfig example below shows how to **deny** access to the `neworder` method from the `pythonapp`, where the python app is in the `myDomain` trust domain and `default` namespace. The nodeapp is in the `public` trust domain.
|
||||
|
||||
**nodeappconfig.yaml**
|
||||
|
||||
```yaml
|
||||
|
@ -220,9 +220,9 @@ spec:
|
|||
- name: /neworder
|
||||
httpVerb: ['POST']
|
||||
action: deny
|
||||
```
|
||||
```
|
||||
|
||||
**pythonappconfig.yaml**
|
||||
**pythonappconfig.yaml**
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -235,7 +235,7 @@ spec:
|
|||
accessControl:
|
||||
defaultAction: allow
|
||||
trustDomain: "myDomain"
|
||||
```
|
||||
```
|
||||
|
||||
### Self-hosted mode
|
||||
This example uses the [hello world](https://github.com/dapr/quickstarts/tree/master/hello-world/README.md) quickstart.
|
||||
|
@ -247,7 +247,7 @@ The following steps run the Sentry service locally with mTLS enabled, set up nec
|
|||
2. In a command prompt, set these environment variables:
|
||||
|
||||
{{< tabs "Linux/MacOS" Windows >}}
|
||||
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
export DAPR_TRUST_ANCHORS=`cat $HOME/.dapr/certs/ca.crt`
|
||||
|
@ -266,10 +266,10 @@ The following steps run the Sentry service locally with mTLS enabled, set up nec
|
|||
$env:NAMESPACE="default"
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
3. Run daprd to launch a Dapr sidecar for the node.js app with mTLS enabled, referencing the local Sentry service:
|
||||
|
||||
```bash
|
||||
|
@ -285,7 +285,7 @@ The following steps run the Sentry service locally with mTLS enabled, set up nec
|
|||
5. In another command prompt, set these environment variables:
|
||||
|
||||
{{< tabs "Linux/MacOS" Windows >}}
|
||||
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
export DAPR_TRUST_ANCHORS=`cat $HOME/.dapr/certs/ca.crt`
|
||||
|
@ -294,7 +294,7 @@ The following steps run the Sentry service locally with mTLS enabled, set up nec
|
|||
export NAMESPACE=default
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
||||
{{% codetab %}}
|
||||
```powershell
|
||||
$env:DAPR_TRUST_ANCHORS=$(Get-Content $env:USERPROFILE\.dapr\certs\ca.crt)
|
||||
|
@ -325,7 +325,7 @@ This example uses the [hello kubernetes](https://github.com/dapr/quickstarts/tre
|
|||
|
||||
You can create and apply the above configuration files `nodeappconfig.yaml` and `pythonappconfig.yaml` as described in the [configuration]({{< ref "configuration-concept.md" >}}) to the Kubernetes deployments.
|
||||
|
||||
For example, below is how the pythonapp is deployed to Kubernetes in the default namespace with this pythonappconfig configuration file.
|
||||
For example, below is how the pythonapp is deployed to Kubernetes in the default namespace with this pythonappconfig configuration file.
|
||||
Do the same for the nodeapp deployment and then look at the logs for the pythonapp to see the calls fail due to the **deny** operation action set in the nodeappconfig file. Change this action to **allow** and re-deploy the apps and you should then see this call succeed.
|
||||
|
||||
```yaml
|
||||
|
@ -353,4 +353,4 @@ spec:
|
|||
containers:
|
||||
- name: python
|
||||
image: dapriosamples/hello-k8s-python:edge
|
||||
```
|
||||
```
|
||||
|
|
|
@ -69,13 +69,13 @@ spec:
|
|||
defaultAccess: deny
|
||||
```
|
||||
|
||||
For applications that need to be deined access to the Kubernetes secret store, follow [these instructions]({{< ref kubernetes-overview >}}), and add the following annotation to the application pod.
|
||||
For applications that need to be deined access to the Kubernetes secret store, follow [these instructions]({{< ref kubernetes-overview >}}), and add the following annotation to the application pod.
|
||||
|
||||
```yaml
|
||||
dapr.io/config: appconfig
|
||||
```
|
||||
|
||||
With this defined, the application no longer has access to Kubernetes secret store.
|
||||
With this defined, the application no longer has access to Kubernetes secret store.
|
||||
|
||||
### Scenario 2 : Allow access to only certain secrets in a secret store
|
||||
|
||||
|
|
|
@ -34,5 +34,5 @@ The following table shows all the supported pod Spec annotations supported by Da
|
|||
| `dapr.io/sidecar-readiness-probe-timeout-seconds` | Number of seconds after which the sidecar readiness probe times out. Read more [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `3`
|
||||
| `dapr.io/sidecar-readiness-probe-period-seconds` | How often (in seconds) to perform the sidecar readiness probe. Read more [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `6`
|
||||
| `dapr.io/sidecar-readiness-probe-threshold` | When the sidecar readiness probe fails, Kubernetes will try N times before giving up. In this case, the Pod will be marked Unready. Read more about `failureThreshold` [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `3`
|
||||
| `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB
|
||||
| `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB
|
||||
| `dapr.io/env` | List of environment variable to be injected into the sidecar. Strings consisting of key=value pairs separated by a comma.
|
||||
|
|
|
@ -83,8 +83,8 @@ dapr init -k --enable-mtls=false
|
|||
|
||||
### Wait for the installation to complete
|
||||
|
||||
You can wait for the installation to complete its deployment with the `--wait` flag.
|
||||
|
||||
You can wait for the installation to complete its deployment with the `--wait` flag.
|
||||
|
||||
The default timeout is 300s (5 min), but can be customized with the `--timeout` flag.
|
||||
|
||||
```bash
|
||||
|
@ -129,7 +129,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
|
|||
```
|
||||
|
||||
To install in high availability mode:
|
||||
|
||||
|
||||
```bash
|
||||
helm upgrade --install dapr dapr/dapr \
|
||||
--version=1.1.2 \
|
||||
|
@ -138,8 +138,8 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
|
|||
--set global.ha.enabled=true \
|
||||
--wait
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
See [Guidelines for production ready deployments on Kubernetes]({{<ref kubernetes-production.md>}}) for more information on installing and upgrading Dapr using Helm.
|
||||
|
||||
### Uninstall Dapr on Kubernetes
|
||||
|
|
|
@ -6,13 +6,13 @@ weight: 60000
|
|||
description: "How to run Dapr apps on Kubernetes clusters with windows nodes"
|
||||
---
|
||||
|
||||
Dapr supports running on kubernetes clusters with windows nodes. You can run your Dapr microservices exclusively on Windows, exclusively on Linux, or a combination of both. This is helpful to users who may be doing a piecemeal migration of a legacy application into a Dapr Kubernetes cluster.
|
||||
Dapr supports running on kubernetes clusters with windows nodes. You can run your Dapr microservices exclusively on Windows, exclusively on Linux, or a combination of both. This is helpful to users who may be doing a piecemeal migration of a legacy application into a Dapr Kubernetes cluster.
|
||||
|
||||
Kubernetes uses a concept called node affinity so that you can denote whether you want your application to be launched on a Linux node or a Windows node. When deploying to a cluster which has both Windows and Linux nodes, you must provide affinity rules for your applications, otherwise the Kubernetes scheduler might launch your application on the wrong type of node.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers support the automatic provisioning of Windows enabled Kubernetes clusters.
|
||||
You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers support the automatic provisioning of Windows enabled Kubernetes clusters.
|
||||
|
||||
1. Follow your preferred provider's instructions for setting up a cluster with Windows enabled
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ The CPU and memory limits above account for the fact that Dapr is intended to a
|
|||
|
||||
When deploying Dapr in a production-ready configuration, it's recommended to deploy with a highly available (HA) configuration of the control plane, which creates 3 replicas of each control plane pod in the dapr-system namespace. This configuration allows for the Dapr control plane to survive node failures and other outages.
|
||||
|
||||
HA mode can be enabled with both the [Dapr CLI]({{< ref "kubernetes-deploy.md#install-in-highly-available-mode" >}} and with [Helm charts]({{< ref "kubernetes-deploy.md#add-and-install-dapr-helm-chart" >}}).
|
||||
HA mode can be enabled with both the [Dapr CLI]({{< ref "kubernetes-deploy.md#install-in-highly-available-mode" >}}) and with [Helm charts]({{< ref "kubernetes-deploy.md#add-and-install-dapr-helm-chart" >}}).
|
||||
|
||||
## Deploying Dapr with Helm
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ description: "Follow these steps to upgrade Dapr on Kubernetes and ensure a smoo
|
|||
## Prerequisites
|
||||
|
||||
- [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
- [Helm 3](https://github.com/helm/helm/releases) (if using Helm)
|
||||
- [Helm 3](https://github.com/helm/helm/releases) (if using Helm)
|
||||
|
||||
## Upgrade existing cluster to 1.2.0
|
||||
There are two ways to upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm.
|
||||
|
@ -54,7 +54,7 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive
|
|||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
```bash
|
||||
helm upgrade dapr dapr/dapr --version [NEW VERSION] --namespace dapr-system --wait
|
||||
```
|
||||
|
@ -64,7 +64,7 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive
|
|||
|
||||
```bash
|
||||
kubectl get pods -n dapr-system -w
|
||||
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dapr-dashboard-69f5c5c867-mqhg4 1/1 Running 0 42s
|
||||
dapr-operator-5cdd6b7f9c-9sl7g 1/1 Running 0 41s
|
||||
|
|
|
@ -29,11 +29,11 @@ See [this sample](https://github.com/dapr/samples/tree/master/hello-dapr-slim) f
|
|||
|
||||
## Enabling state management or pub/sub
|
||||
|
||||
See configuring Redis in self hosted mode [without docker](https://redis.io/topics/quickstart) to enable a local state store or pub/sub broker for messaging.
|
||||
See configuring Redis in self-hosted mode [without docker](https://redis.io/topics/quickstart) to enable a local state store or pub/sub broker for messaging.
|
||||
|
||||
## Enabling actors
|
||||
|
||||
The placement service must be run locally to enable actor placement. Also a [transactional state store](#Enabling-state-management-or-pub/sub) must be enabled for actors.
|
||||
The placement service must be run locally to enable actor placement. Also, a [transactional state store that supports ETags]({{< ref "supported-state-stores.md" >}}) must be enabled to use actors, for example, [Redis configured in self-hosted mode](https://redis.io/topics/quickstart).
|
||||
|
||||
By default for Linux/MacOS the `placement` binary is installed in `/$HOME/.dapr/bin` or for Windows at `%USERPROFILE%\.dapr\bin`.
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ helm install elasticsearch elastic/elasticsearch -n dapr-monitoring --set persis
|
|||
5. Validation
|
||||
|
||||
Ensure Elastic Search and Kibana are running in your Kubernetes cluster.
|
||||
|
||||
|
||||
```bash
|
||||
kubectl get pods -n dapr-monitoring
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
|
@ -135,7 +135,7 @@ spec:
|
|||
|
||||
## Search logs
|
||||
|
||||
> Note: Elastic Search takes a time to index the logs that Fluentd sends.
|
||||
> Note: Elastic Search takes a time to index the logs that Fluentd sends.
|
||||
|
||||
1. Port-forward to svc/kibana-kibana
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ weight: 3000
|
|||
description: "Understand Dapr logging"
|
||||
---
|
||||
|
||||
Dapr produces structured logs to stdout either as a plain text or JSON formatted. By default, all Dapr processes (runtime and system services) write to console out in plain text. To enable JSON formatted logs, you need to add the `--log-as-json` command flag when running Dapr processes.
|
||||
Dapr produces structured logs to stdout either as a plain text or JSON formatted. By default, all Dapr processes (runtime and system services) write to console out in plain text. To enable JSON formatted logs, you need to add the `--log-as-json` command flag when running Dapr processes.
|
||||
|
||||
If you want to use a search engine such as Elastic Search or Azure Monitor to search the logs, it is recommended to use JSON-formatted logs which the log collector and search engine can parse using the built-in JSON parser.
|
||||
|
||||
|
@ -56,7 +56,7 @@ You can enable JSON formatted logs for Dapr system services by adding `--set glo
|
|||
helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true
|
||||
```
|
||||
|
||||
### Enable JSON formatted log for Dapr sidecars
|
||||
### Enable JSON formatted log for Dapr sidecars
|
||||
|
||||
You can enable JSON-formatted logs in Dapr sidecars activated by the Dapr sidecar-injector service by adding the `dapr.io/log-as-json: "true"` annotation to the deployment.
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ spec:
|
|||
|
||||
2. Search Dapr logs
|
||||
|
||||
Here is an example query, to parse JSON formatted logs and query logs from dapr system processes.
|
||||
Here is an example query, to parse JSON formatted logs and query logs from dapr system processes.
|
||||
|
||||
```
|
||||
ContainerLog
|
||||
|
@ -120,7 +120,7 @@ InsightsMetrics
|
|||
| where Namespace == "prometheus" and Name == "process_resident_memory_bytes"
|
||||
| extend tags=parse_json(Tags)
|
||||
| project TimeGenerated, Name, Val, app=tostring(tags['app'])
|
||||
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
|
||||
| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app
|
||||
| where app startswith "dapr-"
|
||||
| render timechart
|
||||
```
|
||||
|
|
|
@ -45,19 +45,19 @@ The `grafana-actor-dashboard.json` template shows Dapr Sidecar status, actor inv
|
|||
```
|
||||
|
||||
1. Install the chart:
|
||||
|
||||
|
||||
```bash
|
||||
helm install grafana grafana/grafana -n dapr-monitoring
|
||||
```
|
||||
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
If you are Minikube user or want to disable persistent volume for development purpose, you can disable it by using the following command instead:
|
||||
|
||||
|
||||
```bash
|
||||
helm install grafana grafana/grafana -n dapr-monitoring --set persistence.enabled=false
|
||||
```
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
|
||||
1. Retrieve the admin password for Grafana login:
|
||||
|
||||
|
@ -68,10 +68,10 @@ The `grafana-actor-dashboard.json` template shows Dapr Sidecar status, actor inv
|
|||
You will get a password similar to `cj3m0OfBNx8SLzUlTx91dEECgzRlYJb60D2evof1%`. Remove the `%` character from the password to get `cj3m0OfBNx8SLzUlTx91dEECgzRlYJb60D2evof1` as the admin password.
|
||||
|
||||
1. Validation Grafana is running in your cluster:
|
||||
|
||||
|
||||
```bash
|
||||
kubectl get pods -n dapr-monitoring
|
||||
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dapr-prom-kube-state-metrics-9849d6cc6-t94p8 1/1 Running 0 4m58s
|
||||
dapr-prom-prometheus-alertmanager-749cc46f6-9b5t8 2/2 Running 0 4m58s
|
||||
|
@ -80,7 +80,7 @@ The `grafana-actor-dashboard.json` template shows Dapr Sidecar status, actor inv
|
|||
dapr-prom-prometheus-node-exporter-bjp9f 1/1 Running 0 4m58s
|
||||
dapr-prom-prometheus-pushgateway-688665d597-h4xx2 1/1 Running 0 4m58s
|
||||
dapr-prom-prometheus-server-694fd8d7c-q5d59 2/2 Running 0 4m58s
|
||||
grafana-c49889cff-x56vj 1/1 Running 0 5m10s
|
||||
grafana-c49889cff-x56vj 1/1 Running 0 5m10s
|
||||
```
|
||||
|
||||
### Configure Prometheus as data source
|
||||
|
@ -120,7 +120,7 @@ First you need to connect Prometheus as a data source to Grafana.
|
|||
|
||||
```bash
|
||||
kubectl get svc -n dapr-monitoring
|
||||
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
dapr-prom-kube-state-metrics ClusterIP 10.0.174.177 <none> 8080/TCP 7d9h
|
||||
dapr-prom-prometheus-alertmanager ClusterIP 10.0.255.199 <none> 80/TCP 7d9h
|
||||
|
@ -131,13 +131,13 @@ First you need to connect Prometheus as a data source to Grafana.
|
|||
elasticsearch-master-headless ClusterIP None <none> 9200/TCP,9300/TCP 7d10h
|
||||
grafana ClusterIP 10.0.15.229 <none> 80/TCP 5d5h
|
||||
kibana-kibana ClusterIP 10.0.188.224 <none> 5601/TCP 7d10h
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
In this guide the server name is `dapr-prom-prometheus-server` and the namespace is `dapr-monitoring`, so the HTTP URL will be `http://dapr-prom-prometheus-server.dapr-monitoring`.
|
||||
|
||||
|
||||
1. Fill in the following settings:
|
||||
|
||||
|
||||
- Name: `Dapr`
|
||||
- HTTP URL: `http://dapr-prom-prometheus-server.dapr-monitoring`
|
||||
- Default: On
|
||||
|
|
|
@ -12,9 +12,9 @@ Dapr will be exporting trace in the OpenTelemetry format when OpenTelemetry is G
|
|||
|
||||
## Requirements
|
||||
|
||||
1. A installation of Dapr on Kubernetes.
|
||||
1. A installation of Dapr on Kubernetes.
|
||||
|
||||
2. You are already setting up your trace backends to receive traces.
|
||||
2. You are already setting up your trace backends to receive traces.
|
||||
|
||||
3. Check OpenTelemetry Collector exporters [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter) and [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter) to see if your trace backend is supported by the OpenTelemetry Collector. On those linked pages, find the exporter you want to use and read its doc to find out the parameters required.
|
||||
|
||||
|
@ -23,11 +23,11 @@ Dapr will be exporting trace in the OpenTelemetry format when OpenTelemetry is G
|
|||
### Run OpenTelemetry Collector to push to your trace backend
|
||||
|
||||
|
||||
1. Check out the file [open-telemetry-collector-generic.yaml](/docs/open-telemetry-collector/open-telemetry-collector-generic.yaml) and replace the section marked with `<your-exporter-here>` with the correct settings for your trace exporter. Again, refer to the OpenTelemetry Collector links in the Prerequisites section to determine the correct settings.
|
||||
1. Check out the file [open-telemetry-collector-generic.yaml](/docs/open-telemetry-collector/open-telemetry-collector-generic.yaml) and replace the section marked with `<your-exporter-here>` with the correct settings for your trace exporter. Again, refer to the OpenTelemetry Collector links in the Prerequisites section to determine the correct settings.
|
||||
|
||||
2. Apply the configuration with `kubectl apply -f open-telemetry-collector-generic.yaml`.
|
||||
|
||||
## Set up Dapr to send trace to OpenTelemetry Collector
|
||||
## Set up Dapr to send trace to OpenTelemetry Collector
|
||||
|
||||
### Turn on tracing in Dapr
|
||||
Next, set up both a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector.
|
||||
|
|
|
@ -20,10 +20,10 @@ Jaeger image published to DockerHub:
|
|||
|
||||
```bash
|
||||
docker run -d --name jaeger \
|
||||
-e COLLECTOR_ZIPKIN_HTTP_PORT=9412 \
|
||||
-e COLLECTOR_ZIPKIN_HOST_PORT=:9412 \
|
||||
-p 16686:16686 \
|
||||
-p 9412:9412 \
|
||||
jaegertracing/all-in-one:1.21
|
||||
jaegertracing/all-in-one:1.22
|
||||
```
|
||||
|
||||
|
||||
|
@ -74,7 +74,7 @@ spec:
|
|||
ingress:
|
||||
enabled: false
|
||||
allInOne:
|
||||
image: jaegertracing/all-in-one:1.13
|
||||
image: jaegertracing/all-in-one:1.22
|
||||
options:
|
||||
query:
|
||||
base-path: /jaeger
|
||||
|
|
|
@ -14,7 +14,7 @@ description: "Set-up New Relic for distributed tracing"
|
|||
|
||||
Dapr natively captures metrics and traces that can be send directly to New Relic. The easiest way to export these is by configuring Dapr to send the traces to [New Relic's Trace API](https://docs.newrelic.com/docs/distributed-tracing/trace-api/report-zipkin-format-traces-trace-api/) using the Zipkin trace format.
|
||||
|
||||
In order for the integration to send data to New Relic [Telemetry Data Platform](https://newrelic.com/platform/telemetry-data-platform), you need a [New Relic Insights Insert API key](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#insights-insert-key).
|
||||
In order for the integration to send data to New Relic [Telemetry Data Platform](https://newrelic.com/platform/telemetry-data-platform), you need a [New Relic Insights Insert API key](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#insights-insert-key).
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -39,7 +39,7 @@ New Relic Distributed Tracing details
|
|||
|
||||
## (optional) New Relic Instrumentation
|
||||
|
||||
In order for the integrations to send data to New Relic Telemetry Data Platform, you either need a [New Relic license key](https://docs.newrelic.com/docs/accounts/accounts-billing/account-setup/new-relic-license-key) or [New Relic Insights Insert API key](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#insights-insert-key).
|
||||
In order for the integrations to send data to New Relic Telemetry Data Platform, you either need a [New Relic license key](https://docs.newrelic.com/docs/accounts/accounts-billing/account-setup/new-relic-license-key) or [New Relic Insights Insert API key](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#insights-insert-key).
|
||||
|
||||
### OpenTelemetry instrumentation
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ linkTitle: "Actors activation performance"
|
|||
weight: 20000
|
||||
description: ""
|
||||
---
|
||||
This article provides service invocation API performance benchmarks and resource utilization for actors in Dapr on Kubernetes.
|
||||
This article provides service invocation API performance benchmarks and resource utilization for actors in Dapr on Kubernetes.
|
||||
|
||||
## System overview
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ linkTitle: "Service invocation performance"
|
|||
weight: 10000
|
||||
description: ""
|
||||
---
|
||||
This article provides service invocation API performance benchmarks and resource utilization for the components needed to run Dapr in different hosting environments.
|
||||
This article provides service invocation API performance benchmarks and resource utilization for the components needed to run Dapr in different hosting environments.
|
||||
|
||||
## System overview
|
||||
|
||||
|
|
|
@ -6,59 +6,59 @@ weight: 3000
|
|||
description: "Require every incoming API request for Dapr to include an authentication token before allowing that request to pass through"
|
||||
---
|
||||
|
||||
By default, Dapr relies on the network boundary to limit access to its public API. If you plan on exposing the Dapr API outside of that boundary, or if your deployment demands an additional level of security, consider enabling the token authentication for Dapr APIs. This will cause Dapr to require every incoming gRPC and HTTP request for its APIs for to include authentication token, before allowing that request to pass through.
|
||||
By default, Dapr relies on the network boundary to limit access to its public API. If you plan on exposing the Dapr API outside of that boundary, or if your deployment demands an additional level of security, consider enabling the token authentication for Dapr APIs. This will cause Dapr to require every incoming gRPC and HTTP request for its APIs for to include authentication token, before allowing that request to pass through.
|
||||
|
||||
## Create a token
|
||||
|
||||
Dapr uses [JWT](https://jwt.io/) tokens for API authentication.
|
||||
Dapr uses [JWT](https://jwt.io/) tokens for API authentication.
|
||||
|
||||
> Note, while Dapr itself is actually not the JWT token issuer in this implementation, being explicit about the use of JWT standard enables federated implementations in the future (e.g. OAuth2).
|
||||
|
||||
To configure API authentication, start by generating your token using any JWT token compatible tool (e.g. https://jwt.io/) and your secret.
|
||||
To configure API authentication, start by generating your token using any JWT token compatible tool (e.g. https://jwt.io/) and your secret.
|
||||
|
||||
> Note, that secret is only necessary to generate the token, and Dapr doesn't need to know about or store it
|
||||
|
||||
## Configure API token authentication in Dapr
|
||||
|
||||
The token authentication configuration is slightly different for either Kubernetes or self-hosted Dapr deployments:
|
||||
|
||||
### Self-hosted
|
||||
The token authentication configuration is slightly different for either Kubernetes or self-hosted Dapr deployments:
|
||||
|
||||
In self-hosting scenario, Dapr looks for the presence of `DAPR_API_TOKEN` environment variable. If that environment variable is set while `daprd` process launches, Dapr will enforce authentication on its public APIs:
|
||||
### Self-hosted
|
||||
|
||||
In self-hosting scenario, Dapr looks for the presence of `DAPR_API_TOKEN` environment variable. If that environment variable is set while `daprd` process launches, Dapr will enforce authentication on its public APIs:
|
||||
|
||||
```shell
|
||||
export DAPR_API_TOKEN=<token>
|
||||
```
|
||||
|
||||
To rotate the configured token, simply set the `DAPR_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
To rotate the configured token, simply set the `DAPR_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
|
||||
### Kubernetes
|
||||
### Kubernetes
|
||||
|
||||
In Kubernetes deployment, Dapr leverages Kubernetes secrets store to hold the JWT token. To configure Dapr APIs authentication start by creating a new secret:
|
||||
|
||||
```shell
|
||||
kubectl create secret generic dapr-api-token --from-literal=token=<token>
|
||||
kubectl create secret generic dapr-api-token --from-literal=token=<token>
|
||||
```
|
||||
|
||||
> Note, the above secret needs to be created in each namespace in which you want to enable Dapr token authentication
|
||||
> Note, the above secret needs to be created in each namespace in which you want to enable Dapr token authentication
|
||||
|
||||
To indicate to Dapr to use that secret to secure its public APIs, add an annotation to your Deployment template spec:
|
||||
|
||||
```yaml
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/api-token-secret: "dapr-api-token" # name of the Kubernetes secret
|
||||
```
|
||||
|
||||
When deployed, Dapr sidecar injector will automatically create a secret reference and inject the actual value into `DAPR_API_TOKEN` environment variable.
|
||||
|
||||
## Rotate a token
|
||||
|
||||
### Self-hosted
|
||||
## Rotate a token
|
||||
|
||||
To rotate the configured token in self-hosted, simply set the `DAPR_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
### Self-hosted
|
||||
|
||||
### Kubernetes
|
||||
To rotate the configured token in self-hosted, simply set the `DAPR_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
|
||||
### Kubernetes
|
||||
|
||||
To rotate the configured token in Kubernates, update the previously created secret with the new token in each namespace. You can do that using `kubectl patch` command, but the easiest way to update these in each namespace is by using manifest:
|
||||
|
||||
|
@ -78,16 +78,16 @@ And then apply it to each namespace:
|
|||
kubectl apply --file token-secret.yaml --namespace <namespace-name>
|
||||
```
|
||||
|
||||
To tell Dapr to start using the new token, trigger a rolling upgrade to each one of your deployments:
|
||||
To tell Dapr to start using the new token, trigger a rolling upgrade to each one of your deployments:
|
||||
|
||||
```shell
|
||||
kubectl rollout restart deployment/<deployment-name> --namespace <namespace-name>
|
||||
```
|
||||
|
||||
> Note, assuming your service is configured with more than one replica, the key rotation process does not result in any downtime.
|
||||
> Note, assuming your service is configured with more than one replica, the key rotation process does not result in any downtime.
|
||||
|
||||
|
||||
## Adding JWT token to client API invocations
|
||||
## Adding JWT token to client API invocations
|
||||
|
||||
Once token authentication is configured in Dapr, all clients invoking Dapr API will have to append the JWT token to every request:
|
||||
|
||||
|
|
|
@ -11,55 +11,55 @@ To enable the application to authenticate requests that are arriving from the Da
|
|||
|
||||
## Create a token
|
||||
|
||||
Dapr uses [JWT](https://jwt.io/) tokens for API authentication.
|
||||
Dapr uses [JWT](https://jwt.io/) tokens for API authentication.
|
||||
|
||||
> Note, while Dapr itself is actually not the JWT token issuer in this implementation, being explicit about the use of JWT standard enables federated implementations in the future (e.g. OAuth2).
|
||||
|
||||
To configure API authentication, start by generating your token using any JWT token compatible tool (e.g. https://jwt.io/) and your secret.
|
||||
To configure API authentication, start by generating your token using any JWT token compatible tool (e.g. https://jwt.io/) and your secret.
|
||||
|
||||
> Note, that secret is only necessary to generate the token, and Dapr doesn't need to know about or store it
|
||||
|
||||
## Configure app API token authentication in Dapr
|
||||
|
||||
The token authentication configuration is slightly different for either Kubernetes or self-hosted Dapr deployments:
|
||||
|
||||
### Self-hosted
|
||||
The token authentication configuration is slightly different for either Kubernetes or self-hosted Dapr deployments:
|
||||
|
||||
In self-hosting scenario, Dapr looks for the presence of `APP_API_TOKEN` environment variable. If that environment variable is set while `daprd` process launches, Dapr includes the token when calling an app:
|
||||
### Self-hosted
|
||||
|
||||
In self-hosting scenario, Dapr looks for the presence of `APP_API_TOKEN` environment variable. If that environment variable is set while `daprd` process launches, Dapr includes the token when calling an app:
|
||||
|
||||
```shell
|
||||
export APP_API_TOKEN=<token>
|
||||
```
|
||||
|
||||
To rotate the configured token, simply set the `APP_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
To rotate the configured token, simply set the `APP_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
|
||||
### Kubernetes
|
||||
### Kubernetes
|
||||
|
||||
In Kubernetes deployment, Dapr leverages Kubernetes secrets store to hold the JWT token. Start by creating a new secret:
|
||||
|
||||
```shell
|
||||
kubectl create secret generic app-api-token --from-literal=token=<token>
|
||||
kubectl create secret generic app-api-token --from-literal=token=<token>
|
||||
```
|
||||
|
||||
> Note, the above secret needs to be created in each namespace in which you want to enable app token authentication
|
||||
> Note, the above secret needs to be created in each namespace in which you want to enable app token authentication
|
||||
|
||||
To indicate to Dapr to use the token in the secret when sending requests to the app, add an annotation to your Deployment template spec:
|
||||
|
||||
```yaml
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/app-token-secret: "app-api-token" # name of the Kubernetes secret
|
||||
```
|
||||
|
||||
When deployed, the Dapr Sidecar Injector automatically creates a secret reference and injects the actual value into `APP_API_TOKEN` environment variable.
|
||||
|
||||
## Rotate a token
|
||||
|
||||
### Self-hosted
|
||||
## Rotate a token
|
||||
|
||||
To rotate the configured token in self-hosted, simply set the `APP_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
### Self-hosted
|
||||
|
||||
### Kubernetes
|
||||
To rotate the configured token in self-hosted, simply set the `APP_API_TOKEN` environment variable to the new value and restart the `daprd` process.
|
||||
|
||||
### Kubernetes
|
||||
|
||||
To rotate the configured token in Kubernates, update the previously created secret with the new token in each namespace. You can do that using `kubectl patch` command, but the easiest way to update these in each namespace is by using manifest:
|
||||
|
||||
|
@ -79,13 +79,13 @@ And then apply it to each namespace:
|
|||
kubectl apply --file token-secret.yaml --namespace <namespace-name>
|
||||
```
|
||||
|
||||
To tell Dapr to start using the new token, trigger a rolling upgrade to each one of your deployments:
|
||||
To tell Dapr to start using the new token, trigger a rolling upgrade to each one of your deployments:
|
||||
|
||||
```shell
|
||||
kubectl rollout restart deployment/<deployment-name> --namespace <namespace-name>
|
||||
```
|
||||
|
||||
> Note, assuming your service is configured with more than one replica, the key rotation process does not result in any downtime.
|
||||
> Note, assuming your service is configured with more than one replica, the key rotation process does not result in any downtime.
|
||||
|
||||
|
||||
## Authenticating requests from Dapr
|
||||
|
|
|
@ -91,23 +91,54 @@ kubectl logs --selector=app=dapr-sentry --namespace <DAPR_NAMESPACE>
|
|||
|
||||
### Bringing your own certificates
|
||||
|
||||
Using Helm, you can provide the PEM encoded root cert, issuer cert and private key that will be populated into the Kubernetes secret.
|
||||
Using Helm, you can provide the PEM encoded root cert, issuer cert and private key that will be populated into the Kubernetes secret used by Sentry.
|
||||
|
||||
*Note: This example uses the step tool to create the certificates. You can install step tool from [here](https://smallstep.com/docs/getting-started/). Windows binaries available [here](https://github.com/smallstep/cli/releases)*
|
||||
_Note: This example uses the OpenSSL command line tool, this is a widely distributed package, easily installed on Linux via the package manager. On Windows OpenSSL can be installed [using chocolatey](https://community.chocolatey.org/packages/openssl). On MacOS it can be installed using brew `brew install openssl`_
|
||||
|
||||
Create the root certificate:
|
||||
Create config files for generating the certificates, this is necessary for generating v3 certificates with the SAN (Subject Alt Name) extension fields. First save the following to a file named `root.conf`:
|
||||
|
||||
```
|
||||
step certificate create cluster.local ca.crt ca.key --profile root-ca --no-password --insecure
|
||||
```ini
|
||||
[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
x509_extensions = v3_req
|
||||
prompt = no
|
||||
[req_distinguished_name]
|
||||
C = US
|
||||
ST = VA
|
||||
L = Daprville
|
||||
O = dapr.io/sentry
|
||||
OU = dapr.io/sentry
|
||||
CN = cluster.local
|
||||
[v3_req]
|
||||
basicConstraints = critical, CA:true
|
||||
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
|
||||
extendedKeyUsage = serverAuth, clientAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = cluster.local
|
||||
```
|
||||
|
||||
Create the issuer certificate:
|
||||
Repeat this for `issuer.conf`, paste the same contents into the file, but add `pathlen:0` to the end of the basicConstraints line, as shown below:
|
||||
|
||||
```
|
||||
step certificate create cluster.local issuer.crt issuer.key --ca ca.crt --ca-key ca.key --profile intermediate-ca --not-after 8760h --no-password --insecure
|
||||
```ini
|
||||
basicConstraints = critical, CA:true, pathlen:0
|
||||
```
|
||||
|
||||
This creates the root and issuer certs and keys.
|
||||
Run the following to generate the root cert and key
|
||||
|
||||
```bash
|
||||
openssl ecparam -genkey -name prime256v1 | openssl ec -out root.key
|
||||
openssl req -new -nodes -sha256 -key root.key -out root.csr -config root.conf -extensions v3_req
|
||||
openssl x509 -req -sha256 -days 365 -in root.csr -signkey root.key -outform PEM -out root.pem -extfile root.conf -extensions v3_req
|
||||
```
|
||||
|
||||
Next run the following to generate the issuer cert and key:
|
||||
|
||||
```bash
|
||||
openssl ecparam -genkey -name prime256v1 | openssl ec -out issuer.key
|
||||
openssl req -new -sha256 -key issuer.key -out issuer.csr -config issuer.conf -extensions v3_req
|
||||
openssl x509 -req -in issuer.csr -CA root.pem -CAkey root.key -CAcreateserial -outform PEM -out issuer.pem -days 365 -sha256 -extfile issuer.conf -extensions v3_req
|
||||
```
|
||||
|
||||
Install Helm and pass the root cert, issuer cert and issuer key to Sentry via configuration:
|
||||
|
||||
|
@ -115,9 +146,9 @@ Install Helm and pass the root cert, issuer cert and issuer key to Sentry via co
|
|||
kubectl create ns dapr-system
|
||||
|
||||
helm install \
|
||||
--set-file dapr_sentry.tls.issuer.certPEM=issuer.crt \
|
||||
--set-file dapr_sentry.tls.issuer.certPEM=issuer.pem \
|
||||
--set-file dapr_sentry.tls.issuer.keyPEM=issuer.key \
|
||||
--set-file dapr_sentry.tls.root.certPEM=ca.crt \
|
||||
--set-file dapr_sentry.tls.root.certPEM=root.pem \
|
||||
--namespace dapr-system \
|
||||
dapr \
|
||||
dapr/dapr
|
||||
|
|
|
@ -72,8 +72,8 @@ spec:
|
|||
- name: authHeaderName
|
||||
value: "<header name under which the secret token is saved>"
|
||||
# forceHTTPS:
|
||||
# This key is used to set HTTPS schema on redirect to your API method
|
||||
# after Dapr successfully received Access Token from Identity Provider.
|
||||
# This key is used to set HTTPS schema on redirect to your API method
|
||||
# after Dapr successfully received Access Token from Identity Provider.
|
||||
# By default, Dapr will use HTTP on this redirect.
|
||||
- name: forceHTTPS
|
||||
value: "<set to true if you invoke an API method through Dapr from https origin>"
|
||||
|
|
|
@ -12,7 +12,7 @@ This topic details the supported versions of Dapr releases, the upgrade policies
|
|||
Dapr releases use `MAJOR.MINOR.PATCH` versioning. For example 1.0.0
|
||||
|
||||
* A `PATCH` version is incremented for bug and security hot fixes.
|
||||
* A `MINOR` version is updated as part of the regular release cadence, including new features, bug and security fixes.
|
||||
* A `MINOR` version is updated as part of the regular release cadence, including new features, bug and security fixes.
|
||||
* A `MAJOR` version is updated when there’s a non-backward compatible change to the runtime, such as an API change. A `MAJOR` release can also occur then there is a considered a significant addition/change of functionality that needs to differentiate from the previous version.
|
||||
|
||||
A supported release means;
|
||||
|
@ -20,14 +20,14 @@ A supported release means;
|
|||
- A hoxfix patch is released if the release has a critical issue such as a mainline broken scenario or a security issue. Each of these are reviewed on a case by case basis.
|
||||
- Issues are investigated for the supported releases. If a release is no longer supported, you need to upgrade to a newer release and determine if the issue is still relevant.
|
||||
|
||||
From the 1.0.0 release onwards two (2) versions of Dapr are supported; the current and previous versions. Typically these are `MINOR`release updates. This means that there is a rolling window that moves forward for supported releases and it is your operational responsibility to remain up to date with these supported versions. If you have an older version of Dapr you may have to do intermediate upgrades to get to a supported version.
|
||||
From the 1.0.0 release onwards two (2) versions of Dapr are supported; the current and previous versions. Typically these are `MINOR`release updates. This means that there is a rolling window that moves forward for supported releases and it is your operational responsibility to remain up to date with these supported versions. If you have an older version of Dapr you may have to do intermediate upgrades to get to a supported version.
|
||||
|
||||
There will be at least 6 weeks between major.minor version releases giving users a 12 week (3 month) rolling window for upgrading.
|
||||
|
||||
Patch support is for supported versions (current and previous).
|
||||
|
||||
## Supported versions
|
||||
The table below shows the versions of Dapr releases that have been tested together and form a "packaged" release. Any other combinations of releases are not supported.
|
||||
The table below shows the versions of Dapr releases that have been tested together and form a "packaged" release. Any other combinations of releases are not supported.
|
||||
|
||||
| Release date | Runtime | CLI | SDKs | Dashboard | Status |
|
||||
|--------------------|:--------:|:--------|---------|---------|---------|
|
||||
|
@ -46,20 +46,20 @@ General guidance on upgrading can be found for [self hosted mode]({{<ref self-ho
|
|||
|
||||
| Current Runtime version | Must upgrade through | Target Runtime version |
|
||||
|--------------------------|-----------------------|------------------------- |
|
||||
| 0.11 | N/A | 1.0.1 |
|
||||
| 0.11 | N/A | 1.0.1 |
|
||||
| | 1.0.1 | 1.1.2 |
|
||||
| 1.0-rc1 to 1.0-rc4 | N/A | 1.0.1 |
|
||||
| 1.0.0 or 1.0.1 | N/A | 1.1.2 |
|
||||
| 1.1.0 or 1.1.1 | N/A | 1.1.2 |
|
||||
| 1.0-rc1 to 1.0-rc4 | N/A | 1.0.1 |
|
||||
| 1.0.0 or 1.0.1 | N/A | 1.1.2 |
|
||||
| 1.1.0 or 1.1.1 | N/A | 1.1.2 |
|
||||
|
||||
## Feature and deprecations
|
||||
There is a process for announcing feature deprecations. Deprecations are applied two (2) releases after the release in which they were announced. For example Feature X is announced to be deprecated in the 1.0.0 release notes and will then be removed in 1.2.0.
|
||||
|
||||
Deprecations appear in release notes under a section named “Deprecations”, which indicates:
|
||||
- The point in the future the now-deprecated feature will no longer be supported. For example release x.y.z. This is at least two (2) releases prior.
|
||||
- Document any steps the user must take to modify their code, operations, etc if applicable in the release notes.
|
||||
- The point in the future the now-deprecated feature will no longer be supported. For example release x.y.z. This is at least two (2) releases prior.
|
||||
- Document any steps the user must take to modify their code, operations, etc if applicable in the release notes.
|
||||
|
||||
After announcing a future breaking change, the change will happen in 2 releases or 6 months, whichever is greater. Deprecated features should respond with warning but do nothing otherwise.
|
||||
After announcing a future breaking change, the change will happen in 2 releases or 6 months, whichever is greater. Deprecated features should respond with warning but do nothing otherwise.
|
||||
|
||||
### Announced deprecations
|
||||
| Feature | Deprecation announcement | Removal |
|
||||
|
|
|
@ -7,10 +7,10 @@ description: "Dapr's versioning policies"
|
|||
---
|
||||
|
||||
## Introduction
|
||||
Dapr is designed for future changes in the runtime, APIs and components with versioning schemes. This topic describes the versioning schemes and strategies for APIs, manifests such as components and Github repositories.
|
||||
Dapr is designed for future changes in the runtime, APIs and components with versioning schemes. This topic describes the versioning schemes and strategies for APIs, manifests such as components and Github repositories.
|
||||
|
||||
## Versioning
|
||||
Versioning is the process of assigning either unique version names or unique version numbers to unique states of computer software.
|
||||
Versioning is the process of assigning either unique version names or unique version numbers to unique states of computer software.
|
||||
- Versioning provides compatibility, explicit change control and handling changes, in particular breaking changes.
|
||||
- Dapr strives to be backwards compatible. If a breaking change is needed it’ll be [announced in advance]({{< ref "support-release-policy#feature-and-deprecations" >}}).
|
||||
- Deprecated features are done over multiple releases with both new and deprecated features working side-by-side.
|
||||
|
@ -19,7 +19,7 @@ Versioning is the process of assigning either unique version names or unique ver
|
|||
Versioning refers to the following Dapr repos: dapr, CLI, stable language SDKs, dashboard, components-contrib, quickstarts, helm-charts and documentation.
|
||||
|
||||
Dapr has the following versioning schemes:
|
||||
- Dapr `HTTP API` versioned with `MAJOR.MINOR`
|
||||
- Dapr `HTTP API` versioned with `MAJOR.MINOR`
|
||||
- Dapr `GRPC API` with `MAJOR`
|
||||
- Releases (GitHub repositories including dapr, CLI, SDKs and Helm Chart) with `MAJOR.MINOR.PATCH`
|
||||
- Documentation and Quickstarts repositories are versioned with the Dapr runtime repository versioning.
|
||||
|
@ -28,32 +28,32 @@ Dapr has the following versioning schemes:
|
|||
|
||||
Note that the Dapr APIs, binaries releases (runtime, CLI, SDKs) and components are all independent from one another.
|
||||
|
||||
## Dapr API
|
||||
The Dapr HTTP API is versioned according to these [REST API guidelines](https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#71-url-structure).
|
||||
## Dapr HTTP API
|
||||
The Dapr HTTP API is versioned according to these [REST API guidelines](https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#71-url-structure).
|
||||
|
||||
Based to the these guidelines;
|
||||
Based to the these guidelines;
|
||||
- A `MAJOR` version of the API is incremented when a deprecation is expected of the older version. Any such deprecation will be communicated and an upgrade path made available.
|
||||
- A `MINOR` versions *may* be incremented for any other changes. For example a change to the JSON schema of the message sent to the API.
|
||||
The definition of a breaking change to the API can be viewed [here](https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#123-definition-of-a-breaking-change).
|
||||
- Experimental APIs include an “alpha” suffix to denote for their alpha status. For example v1.0alpha, v2.0alpha, etc.
|
||||
|
||||
## Dapr runtime
|
||||
Dapr releases use `MAJOR.MINOR.PATCH` versioning. For example 1.0.0. Read [Supported releases]({{< ref support-release-policy.md >}}) for more on the versioning of releases.
|
||||
## Dapr runtime
|
||||
Dapr releases use `MAJOR.MINOR.PATCH` versioning. For example 1.0.0. Read [Supported releases]({{< ref support-release-policy.md >}}) for more on the versioning of releases.
|
||||
|
||||
## Helm Charts
|
||||
Helm charts in the [helm-charts repo](https://github.com/dapr/helm-charts) are versioned with the Dapr runtime. The Helm charts are used in the [Kubernetes deployment]({{< ref "kubernetes-deploy#install-with-helm-advanced" >}})
|
||||
|
||||
## Language SDKs, CLI and dashboard
|
||||
The Dapr language SDKs, CLI and dashboard are versioned independently from the Dapr runtime and can be released at different schedules. See this [table]({{< ref "support-release-policy#supported-versions" >}}) to show the compatibility between versions of the SDKs, CLI, dashboard and runtime. Each new release on the runtime lists the corresponding supported SDKs, CLI and Dashboard.
|
||||
The Dapr language SDKs, CLI and dashboard are versioned independently from the Dapr runtime and can be released at different schedules. See this [table]({{< ref "support-release-policy#supported-versions" >}}) to show the compatibility between versions of the SDKs, CLI, dashboard and runtime. Each new release on the runtime lists the corresponding supported SDKs, CLI and Dashboard.
|
||||
|
||||
SDKs, CLIs and Dashboard are versioning follows a `MAJOR.MINOR.PATCH` format. A major version is incremented when there’s a non-backwards compatible change in an SDK (for example, changing a parameter on a client method. A minor version is updated for new features and bug fixes and the patch version is incremented in case of bug or security hot fixes.
|
||||
|
||||
Samples and examples in SDKs version with that repo.
|
||||
|
||||
## Components
|
||||
Components are implemented in the components-contrib repository and follow a `MAJOR` versioning scheme. The version for components adheres to major versions (vX), as patches and non-breaking changes are added to the latest major version. The version is incremented when there’s a non-backwards compatible change in a component interface, for example, changing an existing method in the State Store interface.
|
||||
Components are implemented in the components-contrib repository and follow a `MAJOR` versioning scheme. The version for components adheres to major versions (vX), as patches and non-breaking changes are added to the latest major version. The version is incremented when there’s a non-backwards compatible change in a component interface, for example, changing an existing method in the State Store interface.
|
||||
|
||||
The [components-contrib](https://github.com/dapr/components-contrib/) repo release is a flat version across all components inside. That is, a version for the components-contrib repo release is made up of all the schemas for the components inside it. A new version of Dapr does not mean there is a new release of components-contrib if there are no component changes.
|
||||
The [components-contrib](https://github.com/dapr/components-contrib/) repo release is a flat version across all components inside. That is, a version for the components-contrib repo release is made up of all the schemas for the components inside it. A new version of Dapr does not mean there is a new release of components-contrib if there are no component changes.
|
||||
|
||||
Note: Components have a production usage lifecycle status: Alpha, Beta and GA (stable). These statuses are not related to their versioning. The tables of supported components shows both their versions and their status.
|
||||
* List of [state store components]({{< ref supported-state-stores.md >}})
|
||||
|
@ -63,7 +63,7 @@ Note: Components have a production usage lifecycle status: Alpha, Beta and GA (s
|
|||
|
||||
For more information on component versioning read [Version 2 and beyond of a component](https://github.com/dapr/components-contrib/blob/master/docs/developing-component.md#version-2-and-beyond-of-a-component)
|
||||
|
||||
### Component schemas
|
||||
### Component schemas
|
||||
|
||||
Versioning for component YAMLs comes in two forms:
|
||||
- Versioning for the component manifest. The `apiVersion`
|
||||
|
@ -88,16 +88,16 @@ spec:
|
|||
```
|
||||
|
||||
### Component manifest version
|
||||
The Component YAML manifest is versioned with `dapr.io/v1alpha1`.
|
||||
The Component YAML manifest is versioned with `dapr.io/v1alpha1`.
|
||||
|
||||
### Component implementation version
|
||||
The version for a component implementation is determined by the `.spec.version` field as can be seen in the example above. The `.spec.version` field is mandatory in a schema instance and the component fails to load if this is not present. For the release of Dapr 1.0.0 all components are marked as `v1`.The component implementation version is incremented only for non-backward compatible changes.
|
||||
|
||||
### Component deprecations
|
||||
Deprecations of components will be announced two (2) releases ahead. Deprecation of a component, results in major version update of the component version. After 2 releases, the component is unregistered from the Dapr runtime, and trying to load it will throw a fatal exception.
|
||||
Deprecations of components will be announced two (2) releases ahead. Deprecation of a component, results in major version update of the component version. After 2 releases, the component is unregistered from the Dapr runtime, and trying to load it will throw a fatal exception.
|
||||
|
||||
## Quickstarts and Samples
|
||||
Quickstarts in the [Quickstarts repo](https://github.com/dapr/quickstarts) are versioned with the runtime, where a table of corresponding versions is on the front page of the samples repo. Users should only use Quickstarts corresponding to the version of the runtime being run.
|
||||
## Quickstarts and Samples
|
||||
Quickstarts in the [Quickstarts repo](https://github.com/dapr/quickstarts) are versioned with the runtime, where a table of corresponding versions is on the front page of the samples repo. Users should only use Quickstarts corresponding to the version of the runtime being run.
|
||||
|
||||
Samples in the [Samples repo](https://github.com/dapr/samples) are each versioned on a case by case basis depending on the sample maintainer. Samples that become very out of date with the runtime releases (many versions behind) or have not been maintained for more than 1 year will be removed.
|
||||
|
||||
|
|
|
@ -64,9 +64,9 @@ In order to further diagnose any issue, check the logs of the Dapr sidecar injec
|
|||
|
||||
*Note: If you installed Dapr to a different namespace, replace dapr-system above with the desired namespace*
|
||||
|
||||
## My pod is in CrashLoopBackoff or another failed state due to the daprd sidecar
|
||||
## My pod is in CrashLoopBackoff or another failed state due to the daprd sidecar
|
||||
|
||||
If the Dapr sidecar (`daprd`) is taking too long to initialize, this might be surfaced as a failing health check by Kubernetes.
|
||||
If the Dapr sidecar (`daprd`) is taking too long to initialize, this might be surfaced as a failing health check by Kubernetes.
|
||||
|
||||
If your pod is in a failed state you should check this:
|
||||
|
||||
|
@ -209,8 +209,8 @@ export DAPR_HOST_IP=127.0.0.1
|
|||
|
||||
This is usually due to one of the following issues
|
||||
|
||||
- You may have defined the `NAMESPACE` environment variable locally or deployed your components into a different namespace in Kubernetes. Check which namespace your app and the components are deployed to. Read [scoping components to one or more applications]({{< ref "component-scopes.md" >}}) for more information.
|
||||
- You may have not provided a `--components-path` with the Dapr `run` commands or not placed your components into the default components folder for your OS. Read [define a component]({{< ref "get-started-component.md" >}}) for more information.
|
||||
- You may have defined the `NAMESPACE` environment variable locally or deployed your components into a different namespace in Kubernetes. Check which namespace your app and the components are deployed to. Read [scoping components to one or more applications]({{< ref "component-scopes.md" >}}) for more information.
|
||||
- You may have not provided a `--components-path` with the Dapr `run` commands or not placed your components into the default components folder for your OS. Read [define a component]({{< ref "get-started-component.md" >}}) for more information.
|
||||
- You may have a syntax issue in component YAML file. Check your component YAML with the component [YAML samples]({{< ref "components.md" >}}).
|
||||
|
||||
## Service invocation is failing and my Dapr service is missing an appId (macOS)
|
||||
|
|
|
@ -38,7 +38,7 @@ actorType | The actor type.
|
|||
actorId | The actor ID.
|
||||
method | The name of the method to invoke.
|
||||
|
||||
> Note, all URL parameters are case-sensitive.
|
||||
> Note, all URL parameters are case-sensitive.
|
||||
|
||||
#### Examples
|
||||
|
||||
|
@ -50,7 +50,7 @@ curl -X POST http://localhost:3500/v1.0/actors/stormtrooper/50/method/shoot \
|
|||
```
|
||||
|
||||
Example of invoking a method on an actor that takes parameters: You can provided the method parameters and values in the body of the request, for example in curl using -d "{\"param\":\"value\"}"
|
||||
|
||||
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3500/v1.0/actors/x-wing/33/method/fly \
|
||||
|
@ -196,7 +196,7 @@ A `dueTime` of 0 means to fire immediately. The following body means to fire im
|
|||
}
|
||||
```
|
||||
|
||||
To configure the reminder to fire once only, the period should be set to empty string. The following specifies a `dueTime` of 3 seconds with a period of empty string, which means the reminder will fire in 3 seconds and then never fire again.
|
||||
To configure the reminder to fire once only, the period should be set to empty string. The following specifies a `dueTime` of 3 seconds with a period of empty string, which means the reminder will fire in 3 seconds and then never fire again.
|
||||
```json
|
||||
{
|
||||
"dueTime":"0h0m3s0ms",
|
||||
|
@ -502,7 +502,7 @@ curl -X DELETE http://localhost:3000/actors/stormtrooper/50 \
|
|||
|
||||
### Invoke actor method
|
||||
|
||||
Invokes a method for an actor with the specified methodName where parameters to the method are passed in the body of the request message and return values are provided in the body of the response message. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
Invokes a method for an actor with the specified methodName where parameters to the method are passed in the body of the request message and return values are provided in the body of the response message. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
|
@ -531,7 +531,7 @@ methodName | The name of the method to invoke.
|
|||
|
||||
#### Examples
|
||||
|
||||
Example of invoking a method for an actor: The example calls the performAction method on the actor type stormtrooper that has actorId of 50
|
||||
Example of invoking a method for an actor: The example calls the performAction method on the actor type stormtrooper that has actorId of 50
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3000/actors/stormtrooper/50/method/performAction \
|
||||
|
@ -540,7 +540,7 @@ curl -X POST http://localhost:3000/actors/stormtrooper/50/method/performAction \
|
|||
|
||||
### Invoke reminder
|
||||
|
||||
Invokes a reminder for an actor with the specified reminderName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
Invokes a reminder for an actor with the specified reminderName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
|
@ -569,7 +569,7 @@ reminderName | The name of the reminder to invoke.
|
|||
|
||||
#### Examples
|
||||
|
||||
Example of invoking a reminder for an actor: The example calls the checkRebels reminder method on the actor type stormtrooper that has actorId of 50
|
||||
Example of invoking a reminder for an actor: The example calls the checkRebels reminder method on the actor type stormtrooper that has actorId of 50
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3000/actors/stormtrooper/50/method/remind/checkRebels \
|
||||
|
@ -578,7 +578,7 @@ curl -X POST http://localhost:3000/actors/stormtrooper/50/method/remind/checkReb
|
|||
|
||||
### Invoke timer
|
||||
|
||||
Invokes a timer for an actor rwith the specified timerName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
Invokes a timer for an actor rwith the specified timerName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
|
@ -607,7 +607,7 @@ timerName | The name of the timer to invoke.
|
|||
|
||||
#### Examples
|
||||
|
||||
Example of invoking a timer for an actor: The example calls the checkRebels timer method on the actor type stormtrooper that has actorId of 50
|
||||
Example of invoking a timer for an actor: The example calls the checkRebels timer method on the actor type stormtrooper that has actorId of 50
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3000/actors/stormtrooper/50/method/timer/checkRebels \
|
||||
|
|
|
@ -87,7 +87,7 @@ def incoming():
|
|||
### Binding endpoints
|
||||
|
||||
Bindings are discovered from component yaml files. Dapr calls this endpoint on startup to ensure that app can handle this call. If the app doesn't have the endpoint, Dapr ignores it.
|
||||
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
```
|
||||
|
@ -108,12 +108,12 @@ Parameter | Description
|
|||
appPort | the application port
|
||||
name | the name of the binding
|
||||
|
||||
> Note, all URL parameters are case-sensitive.
|
||||
> Note, all URL parameters are case-sensitive.
|
||||
|
||||
### Binding payload
|
||||
|
||||
In order to deliver binding inputs, a POST call is made to user code with the name of the binding as the URL path.
|
||||
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
```
|
||||
|
@ -148,7 +148,7 @@ If `concurrency` is not set, it is sent out sequential (the example below shows
|
|||
{
|
||||
"storeName": "stateStore",
|
||||
"state": stateDataToStore,
|
||||
|
||||
|
||||
"to": ['storage', 'queue'],
|
||||
"concurrency": "parallel",
|
||||
"data": jsonObject,
|
||||
|
|
|
@ -63,7 +63,7 @@ version | string | Component version.
|
|||
|
||||
### Examples
|
||||
|
||||
Note: This example is based on the Actor sample provided in the [Dapr SDK for Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_actor).
|
||||
Note: This example is based on the Actor sample provided in the [Dapr SDK for Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_actor).
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/metadata
|
||||
|
@ -143,7 +143,7 @@ Note: This example is based on the Actor sample provided in the [Dapr SDK for Py
|
|||
Add a custom attribute to the metadata endpoint:
|
||||
|
||||
```shell
|
||||
curl -X PUT -H "Content-Type: text/plain" --data "myDemoAttributeValue" http://localhost:3500/v1.0/metadata/myDemoAttribute
|
||||
curl -X PUT -H "Content-Type: text/plain" --data "myDemoAttributeValue" http://localhost:3500/v1.0/metadata/myDemoAttribute
|
||||
```
|
||||
|
||||
Get the metadata information to confirm your custom attribute was added:
|
||||
|
|
|
@ -168,4 +168,4 @@ Dapr Pub/Sub adheres to version 1.0 of CloudEvents.
|
|||
## Related links
|
||||
|
||||
* [How to publish to and consume topics]({{< ref howto-publish-subscribe.md >}})
|
||||
* [Sample for pub/sub](https://github.com/dapr/quickstarts/tree/master/pub-sub)
|
||||
* [Sample for pub/sub](https://github.com/dapr/quickstarts/tree/master/pub-sub)
|
||||
|
|
|
@ -57,7 +57,7 @@ If a secret store has support for multiple keys in a secret, a JSON payload is r
|
|||
|
||||
In case of a secret store that only has name/value semantics, a JSON payload is returned with the name of the secret as the field and the value of the secret as the value.
|
||||
|
||||
##### Response with multiple keys in a secret (eg. Kubernetes):
|
||||
##### Response with multiple keys in a secret (eg. Kubernetes):
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/secrets/kubernetes/db-secret
|
||||
|
@ -134,7 +134,7 @@ secret-store-name | the name of the secret store to get the secret from
|
|||
|
||||
The returned response is a JSON containing the secrets. The JSON object will contain the secret names as fields and a map of secret keys and values as the field value.
|
||||
|
||||
##### Response with multiple secrets and multiple key / values in a secret (eg. Kubernetes):
|
||||
##### Response with multiple secrets and multiple key / values in a secret (eg. Kubernetes):
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/secrets/kubernetes/bulk
|
||||
|
|
|
@ -100,7 +100,7 @@ For a Node app this would look like:
|
|||
app.post('/add', (req, res) => {
|
||||
let args = req.body;
|
||||
const [operandOne, operandTwo] = [Number(args['arg1']), Number(args['arg2'])];
|
||||
|
||||
|
||||
let result = operandOne + operandTwo;
|
||||
res.send(result.toString());
|
||||
});
|
||||
|
@ -110,7 +110,7 @@ app.listen(port, () => console.log(`Listening on port ${port}!`));
|
|||
|
||||
> The response from the remote endpoint will be returned in the response body.
|
||||
|
||||
In case when your service listens on a more nested path (e.g. `/api/v1/add`), Dapr implements a full reverse proxy so you can append all the necessary path fragments to your request URL like this:
|
||||
In case when your service listens on a more nested path (e.g. `/api/v1/add`), Dapr implements a full reverse proxy so you can append all the necessary path fragments to your request URL like this:
|
||||
|
||||
`http://localhost:3500/v1.0/invoke/mathService/method/api/v1/add`
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ etag | (optional) state ETag
|
|||
metadata | (optional) additional key-value pairs to be passed to the state store
|
||||
options | (optional) state operation options, see [state operation options](#optional-behaviors)
|
||||
|
||||
> **ETag format** Dapr runtime treats ETags as opaque strings. The exact ETag format is defined by the corresponding data store.
|
||||
> **ETag format** Dapr runtime treats ETags as opaque strings. The exact ETag format is defined by the corresponding data store.
|
||||
|
||||
### HTTP Response
|
||||
|
||||
|
@ -157,7 +157,7 @@ ETag | ETag of returned value
|
|||
#### Response Body
|
||||
JSON-encoded value
|
||||
|
||||
### Example
|
||||
### Example
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/state/starwars/planet \
|
||||
|
@ -211,7 +211,7 @@ Code | Description
|
|||
#### Response Body
|
||||
An array of JSON-encoded values
|
||||
|
||||
### Example
|
||||
### Example
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/state/myRedisStore/bulk \
|
||||
|
@ -430,7 +430,7 @@ Dapr assumes data stores are eventually consistent by default. A state should:
|
|||
* For write request, the state store should asynchronously replicate updates to configured quorum after acknowledging the update request.
|
||||
|
||||
#### Strong Consistency
|
||||
|
||||
|
||||
When a strong consistency hint is attached, a state store should:
|
||||
|
||||
* For read requests, the state store should return the most up-to-date data consistently across replicas.
|
||||
|
@ -475,13 +475,13 @@ curl -X POST http://localhost:3500/v1.0/state/statestore \
|
|||
Get the object to find the ETag that was set automatically by the statestore:
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/state/statestore/sampleData -v
|
||||
curl http://localhost:3500/v1.0/state/statestore/sampleData -v
|
||||
* Connected to localhost (127.0.0.1) port 3500 (#0)
|
||||
> GET /v1.0/state/statestore/sampleData HTTP/1.1
|
||||
> Host: localhost:3500
|
||||
> User-Agent: curl/7.64.1
|
||||
> Accept: */*
|
||||
>
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Server: fasthttp
|
||||
< Date: Sun, 14 Feb 2021 04:51:50 GMT
|
||||
|
@ -489,7 +489,7 @@ curl http://localhost:3500/v1.0/state/statestore/sampleData -v
|
|||
< Content-Length: 3
|
||||
< Etag: 1
|
||||
< Traceparent: 00-3452582897d134dc9793a244025256b1-b58d8d773e4d661d-01
|
||||
<
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
"1"* Closing connection 0
|
||||
```
|
||||
|
|
|
@ -10,13 +10,13 @@ The Dapr CLI allows you to setup Dapr on your local dev machine or on a Kubernet
|
|||
|
||||
```bash
|
||||
|
||||
__
|
||||
__
|
||||
____/ /___ _____ _____
|
||||
/ __ / __ '/ __ \/ ___/
|
||||
/ /_/ / /_/ / /_/ / /
|
||||
\__,_/\__,_/ .___/_/
|
||||
/_/
|
||||
|
||||
/ /_/ / /_/ / /_/ / /
|
||||
\__,_/\__,_/ .___/_/
|
||||
/_/
|
||||
|
||||
===============================
|
||||
Distributed Application Runtime
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ dapr configurations [flags]
|
|||
|
||||
| Name | Environment Variable | Default | Description
|
||||
| --- | --- | --- | --- |
|
||||
| `--kubernetes`, `-k` | | `false` | List all Dapr configurations in a Kubernetes cluster
|
||||
| `--kubernetes`, `-k` | | `false` | List all Dapr configurations in a Kubernetes cluster
|
||||
| `--name`, `-n` | | | The configuration name to be printed (optional)
|
||||
| `--output`, `-o` | | `list`| Output format (options: json or yaml or list)
|
||||
| `--help`, `-h` | | | Print this help message |
|
||||
|
|
|
@ -48,8 +48,8 @@ dapr init -k
|
|||
|
||||
### Initialize Dapr in Kubernetes and wait for the installation to complete
|
||||
|
||||
You can wait for the installation to complete its deployment with the `--wait` flag.
|
||||
|
||||
You can wait for the installation to complete its deployment with the `--wait` flag.
|
||||
|
||||
The default timeout is 300s (5 min), but can be customized with the `--timeout` flag.
|
||||
```bash
|
||||
dapr init -k --wait --timeout 600
|
||||
|
|
|
@ -32,7 +32,7 @@ dapr invoke [flags]
|
|||
## Examples
|
||||
|
||||
### Invoke a sample method on target app with POST Verb
|
||||
```bash
|
||||
```bash
|
||||
dapr invoke --app-id target --method sample --data '{"key":"value"}'
|
||||
```
|
||||
|
||||
|
|
|
@ -29,6 +29,6 @@ dapr mtls export [flags]
|
|||
## Examples
|
||||
|
||||
### Check expiry of Kubernetes certs
|
||||
```bash
|
||||
```bash
|
||||
dapr mtls export -o ./certs
|
||||
```
|
||||
|
|
|
@ -32,7 +32,7 @@ dapr run [flags] [command]
|
|||
| `--config`, `-c` | | `Linux & Mac: $HOME/.dapr/config.yaml`, `Windows: %USERPROFILE%\.dapr\config.yaml` | Dapr configuration file |
|
||||
| `--dapr-grpc-port` | | `50001` | The gRPC port for Dapr to listen on |
|
||||
| `--dapr-http-port` | | `3500` | The HTTP port for Dapr to listen on |
|
||||
| `--enable-profiling` | | `false` | Enable `pprof` profiling via an HTTP endpoint
|
||||
| `--enable-profiling` | | `false` | Enable `pprof` profiling via an HTTP endpoint
|
||||
| `--help`, `-h` | | | Print this help message |
|
||||
| `--image` | | | The image to build the code in. Input is: `repository/image` |
|
||||
| `--log-level` | | `info` | The log verbosity. Valid values are: `debug`, `info`, `warn`, `error`, `fatal`, or `panic` |
|
||||
|
|
|
@ -4,7 +4,7 @@ title: "Bindings component specs"
|
|||
linkTitle: "Bindings"
|
||||
weight: 3000
|
||||
description: The supported external bindings that interface with Dapr
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/"
|
||||
no_list: true
|
||||
---
|
||||
|
@ -25,7 +25,7 @@ Table captions:
|
|||
|
||||
| Name | Input<br>Binding | Output<br>Binding | Status | Component version | Since runtime version |
|
||||
|------|:----------------:|:-----------------:|--------|-------- | ---------|
|
||||
| [Apple Push Notifications (APN)]({{< ref apns.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Apple Push Notifications (APN)]({{< ref apns.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Cron (Scheduler)]({{< ref cron.md >}}) | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [HTTP]({{< ref http.md >}}) | | ✅ | GA | v1 | 1.0 |
|
||||
| [InfluxDB]({{< ref influxdb.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
|
@ -69,7 +69,7 @@ Table captions:
|
|||
|
||||
### Microsoft Azure
|
||||
|
||||
| Name | Input<br>Binding | Output<br>Binding | Status | Component version | Since |
|
||||
| Name | Input<br>Binding | Output<br>Binding | Status | Component version | Since |
|
||||
|------|:----------------:|:-----------------:|--------| --------- | ---------- |
|
||||
| [Azure Blob Storage]({{< ref blobstorage.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure CosmosDB]({{< ref cosmosdb.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Alibaba Cloud Object Storage Service binding spec"
|
||||
linkTitle: "Alibaba Cloud Object Storage"
|
||||
description: "Detailed documentation on the Alibaba Cloud Object Storage binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/alicloudoss/"
|
||||
---
|
||||
|
||||
|
@ -44,7 +44,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| `accessKey` | Y | Output | Access key credential. |
|
||||
| `bucket` | Y | Output | Name of the storage bucket. |
|
||||
|
||||
## Binding support
|
||||
## Binding support
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
- `create`: [Create object](#create-object)
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Apple Push Notification Service binding spec"
|
||||
linkTitle: "Apple Push Notification Service"
|
||||
description: "Detailed documentation on the Apple Push Notification Service binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/apns/"
|
||||
---
|
||||
|
||||
|
@ -42,8 +42,8 @@ spec:
|
|||
| private-key | Y | Output| Is a PKCS #8-formatted private key. It is intended that the private key is stored in the secret store and not exposed directly in the configuration. See [here](#private-key) for more details | `"pem file"` |
|
||||
|
||||
### Private key
|
||||
The APNS binding needs a cryptographic private key in order to generate authentication tokens for the APNS service.
|
||||
The private key can be generated from the Apple Developer Portal and is provided as a PKCS #8 file with the private key stored in PEM format.
|
||||
The APNS binding needs a cryptographic private key in order to generate authentication tokens for the APNS service.
|
||||
The private key can be generated from the Apple Developer Portal and is provided as a PKCS #8 file with the private key stored in PEM format.
|
||||
The private key should be stored in the Dapr secret store and not stored directly in the binding's configuration file.
|
||||
|
||||
A sample configuration file for the APNS binding is shown below:
|
||||
|
@ -89,8 +89,8 @@ This component supports **output binding** with the following operations:
|
|||
|
||||
## Push notification format
|
||||
|
||||
The APNS binding is a pass-through wrapper over the Apple Push Notification Service. The APNS binding will send the request directly to the APNS service without any translation.
|
||||
It is therefore important to understand the payload for push notifications expected by the APNS service.
|
||||
The APNS binding is a pass-through wrapper over the Apple Push Notification Service. The APNS binding will send the request directly to the APNS service without any translation.
|
||||
It is therefore important to understand the payload for push notifications expected by the APNS service.
|
||||
The payload format is documented [here](https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/generating_a_remote_notification).
|
||||
|
||||
### Request format
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Azure Blob Storage binding spec"
|
||||
linkTitle: "Azure Blob Storage"
|
||||
description: "Detailed documentation on the Azure Blob Storage binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/blobstorage/"
|
||||
---
|
||||
|
||||
|
@ -45,7 +45,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| storageAccessKey | Y | Output | The Blob Storage access key | `"access-key"` |
|
||||
| container | Y | Output | The name of the Blob Storage container to write to | `"myexamplecontainer"` |
|
||||
| decodeBase64 | N | Output | Configuration to decode base64 file content before saving to Blob Storage. (In case of saving a file with binary content). `"true"` is the only allowed positive value. Other positive variations like `"True"` are not acceptable. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| getBlobRetryCount | N | Output | Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader Defaults to `"10"` | `"1"`, `"2"`
|
||||
| getBlobRetryCount | N | Output | Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader Defaults to `"10"` | `"1"`, `"2"`
|
||||
|
||||
|
||||
## Binding support
|
||||
|
@ -73,7 +73,7 @@ To perform a create blob operation, invoke the Azure Blob Storage binding with a
|
|||
|
||||
|
||||
##### Save text to a random generated UUID blob
|
||||
|
||||
|
||||
{{< tabs Windows Linux >}}
|
||||
{{% codetab %}}
|
||||
On Windows, utilize cmd prompt (PowerShell has different escaping mechanism)
|
||||
|
@ -185,14 +185,14 @@ To perform a get blob operation, invoke the Azure Blob Storage binding with a `P
|
|||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ \"operation\": \"get\", \"metadata\": { \"blobName\": \"myblob\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
curl -d '{ \"operation\": \"get\", \"metadata\": { \"blobName\": \"myblob\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "get", "metadata": { "blobName": "myblob" }}' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
@ -223,14 +223,14 @@ To perform a delete blob operation, invoke the Azure Blob Storage binding with a
|
|||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"blobName\": \"myblob\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"blobName\": \"myblob\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "delete", "metadata": { "blobName": "myblob" }}' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
@ -242,14 +242,14 @@ To perform a delete blob operation, invoke the Azure Blob Storage binding with a
|
|||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"blobName\": \"myblob\", \"DeleteSnapshotOptions\": \"only\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"blobName\": \"myblob\", \"DeleteSnapshotOptions\": \"only\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "delete", "metadata": { "blobName": "myblob", "DeleteSnapshotOptions": "only" }}' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
@ -261,14 +261,14 @@ To perform a delete blob operation, invoke the Azure Blob Storage binding with a
|
|||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"blobName\": \"myblob\", \"DeleteSnapshotOptions\": \"include\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"blobName\": \"myblob\", \"DeleteSnapshotOptions\": \"include\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "delete", "metadata": { "blobName": "myblob", "DeleteSnapshotOptions": "include" }}' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Azure CosmosDB binding spec"
|
||||
linkTitle: "Azure CosmosDB"
|
||||
description: "Detailed documentation on the Azure CosmosDB binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/cosmosdb/"
|
||||
---
|
||||
|
||||
|
@ -42,11 +42,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|--------|---------|---------|
|
||||
| url | Y | Output | The CosmosDB url | `"https://******.documents.azure.com:443/"` |
|
||||
| url | Y | Output | The CosmosDB url | `"https://******.documents.azure.com:443/"` |
|
||||
| masterKey | Y | Output | The CosmosDB account master key | `"master-key"` |
|
||||
| database | Y | Output | The name of the CosmosDB database | `"OrderDb"` |
|
||||
| collection | Y | Output | The name of the container inside the database. | `"Orders"` |
|
||||
| partitionKey | Y | Output | The name of the partitionKey to extract from the payload and is used in the container | `"OrderId"`, `"message"` |
|
||||
| partitionKey | Y | Output | The name of the partitionKey to extract from the payload and is used in the container | `"OrderId"`, `"message"` |
|
||||
|
||||
For more information see [Azure Cosmos DB resource model](https://docs.microsoft.com/en-us/azure/cosmos-db/account-databases-containers-items).
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Cron binding spec"
|
||||
linkTitle: "Cron"
|
||||
description: "Detailed documentation on the cron binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/cron/"
|
||||
---
|
||||
|
||||
|
@ -23,7 +23,7 @@ spec:
|
|||
version: v1
|
||||
metadata:
|
||||
- name: schedule
|
||||
value: "@every 15m" # valid cron schedule
|
||||
value: "@every 15m" # valid cron schedule
|
||||
```
|
||||
|
||||
## Spec metadata fields
|
||||
|
@ -32,9 +32,9 @@ spec:
|
|||
|--------------------|:--------:|-------|--------|---------|
|
||||
| schedule | Y | Input/Output | The valid cron schedule to use. See [this](#schedule-format) for more details | `"@every 15m"`
|
||||
|
||||
### Schedule Format
|
||||
### Schedule Format
|
||||
|
||||
The Dapr cron binding supports following formats:
|
||||
The Dapr cron binding supports following formats:
|
||||
|
||||
| Character | Descriptor | Acceptable values |
|
||||
|:---------:|-------------------|-----------------------------------------------|
|
||||
|
@ -45,7 +45,7 @@ The Dapr cron binding supports following formats:
|
|||
| 5 | Month | 1 to 12, or * |
|
||||
| 6 | Day of the week | 0 to 7 (where 0 and 7 represent Sunday), or * |
|
||||
|
||||
For example:
|
||||
For example:
|
||||
|
||||
* `30 * * * * *` - every 30 seconds
|
||||
* `0 15 * * * *` - every 15 minutes
|
||||
|
@ -57,11 +57,11 @@ For example:
|
|||
For ease of use, the Dapr cron binding also supports few shortcuts:
|
||||
|
||||
* `@every 15s` where `s` is seconds, `m` minutes, and `h` hours
|
||||
* `@daily` or `@hourly` which runs at that period from the time the binding is initialized
|
||||
* `@daily` or `@hourly` which runs at that period from the time the binding is initialized
|
||||
|
||||
## Binding support
|
||||
|
||||
This component supports both **input and output** binding interfaces.
|
||||
This component supports both **input and output** binding interfaces.
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "AWS DynamoDB binding spec"
|
||||
linkTitle: "AWS DynamoDB"
|
||||
description: "Detailed documentation on the AWS DynamoDB binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/dynamodb/"
|
||||
---
|
||||
|
||||
|
@ -44,7 +44,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|------------|-----|---------|
|
||||
| table | Y | Output | The DynamoDB table name | `"items"` |
|
||||
| table | Y | Output | The DynamoDB table name | `"items"` |
|
||||
| region | Y | Output | The specific AWS region the AWS DynamoDB instance is deployed in | `"us-east-1"` |
|
||||
| accessKey | Y | Output | The AWS Access Key to access this resource | `"key"` |
|
||||
| secretKey | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` |
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Azure Event Grid binding spec"
|
||||
linkTitle: "Azure Event Grid"
|
||||
description: "Detailed documentation on the Azure Event Grid binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/eventgrid/"
|
||||
---
|
||||
|
||||
|
@ -32,7 +32,7 @@ spec:
|
|||
- name: clientSecret
|
||||
value: "[ClientSecret]"
|
||||
- name: subscriberEndpoint
|
||||
value: "[SubscriberEndpoint]"
|
||||
value: "[SubscriberEndpoint]"
|
||||
- name: handshakePort
|
||||
value: [HandshakePort]
|
||||
- name: scope
|
||||
|
@ -60,13 +60,13 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| clientId | Y | Input | The client id that should be used by the binding to create or update the Event Grid Event Subscription | `"clientId"` |
|
||||
| clientSecret | Y | Input | The client id that should be used by the binding to create or update the Event Grid Event Subscription | `"clientSecret"` |
|
||||
| subscriberEndpoint | Y | Input | The https endpoint in which Event Grid will handshake and send Cloud Events. If you aren't re-writing URLs on ingress, it should be in the form of: `https://[YOUR HOSTNAME]/api/events` If testing on your local machine, you can use something like [ngrok](https://ngrok.com) to create a public endpoint. | `"https://[YOUR HOSTNAME]/api/events"` |
|
||||
| handshakePort | Y | Input | The container port that the input binding will listen on for handshakes and events | `"9000"` |
|
||||
| handshakePort | Y | Input | The container port that the input binding will listen on for handshakes and events | `"9000"` |
|
||||
| scope | Y | Input | The identifier of the resource to which the event subscription needs to be created or updated. See [here](#scope) for more details | `"/subscriptions/{subscriptionId}/"` |
|
||||
| eventSubscriptionName | N | Input | The name of the event subscription. Event subscription names must be between 3 and 64 characters in length and should use alphanumeric letters only | `"name"` |
|
||||
| accessKey | Y | Output | The Access Key to be used for publishing an Event Grid Event to a custom topic | `"accessKey"` |
|
||||
| topicEndpoint | Y | Output | The topic endpoint in which this output binding should publish events | `"topic-endpoint"` |
|
||||
|
||||
### Scope
|
||||
### Scope
|
||||
|
||||
Scope is the identifier of the resource to which the event subscription needs to be created or updated. The scope can be a subscription, or a resource group, or a top level resource belonging to a resource provider namespace, or an Event Grid topic. For example:
|
||||
- `'/subscriptions/{subscriptionId}/'` for a subscription
|
||||
|
@ -76,7 +76,7 @@ Scope is the identifier of the resource to which the event subscription needs to
|
|||
> Values in braces {} should be replaced with actual values.
|
||||
## Binding support
|
||||
|
||||
This component supports both **input and output** binding interfaces.
|
||||
This component supports both **input and output** binding interfaces.
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
|
||||
|
@ -246,7 +246,7 @@ $ kubectl logs nginx-nginx-ingress-controller-649df94867-fp6mg nginx-ingress-con
|
|||
$ kubectl delete pod nginx-nginx-ingress-controller-649df94867-fp6mg
|
||||
|
||||
# Check the logs again - it should start returning 200
|
||||
# .."OPTIONS /api/events HTTP/1.1" 200..
|
||||
# .."OPTIONS /api/events HTTP/1.1" 200..
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
|
|
@ -3,7 +3,7 @@ type: docs
|
|||
title: "Azure Event Hubs binding spec"
|
||||
linkTitle: "Azure Event Hubs"
|
||||
description: "Detailed documentation on the Azure Event Hubs binding component"
|
||||
aliases:
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/eventhubs/"
|
||||
---
|
||||
|
||||
|
@ -28,11 +28,11 @@ spec:
|
|||
- name: consumerGroup # EventHubs consumer group
|
||||
value: "group1"
|
||||
- name: storageAccountName # Azure Storage Account Name
|
||||
value: "accountName"
|
||||
value: "accountName"
|
||||
- name: storageAccountKey # Azure Storage Account Key
|
||||
value: "accountKey"
|
||||
value: "accountKey"
|
||||
- name: storageContainerName # Azure Storage Container Name
|
||||
value: "containerName"
|
||||
value: "containerName"
|
||||
- name: partitionID # (Optional) PartitionID to send and receive events
|
||||
value: 0
|
||||
```
|
||||
|
@ -48,7 +48,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| connectionString | Y | Output | The [EventHubs connection string](https://docs.microsoft.com/en-us/azure/event-hubs/authorize-access-shared-access-signature). Note that this is the EventHub itself and not the EventHubs namespace. Make sure to use the child EventHub shared access policy connection string | `"Endpoint=sb://****"` |
|
||||
| consumerGroup | Y | Output | The name of an [EventHubs Consumer Group](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-features#consumer-groups) to listen on | `"group1"` |
|
||||
| storageAccountName | Y | Output | The name of the account of the Azure Storage account to persist checkpoints data on | `"accountName"` |
|
||||
| storageAccountKey | Y | Output | The account key for the Azure Storage account to persist checkpoints data on | `"accountKey"` |
|
||||
| storageAccountKey | Y | Output | The account key for the Azure Storage account to persist checkpoints data on | `"accountKey"` |
|
||||
| storageContainerName | Y | Output | The name of the container in the Azure Storage account to persist checkpoints data on | `"contianerName"` |
|
||||
| partitionID | N | Output | ID of the partition to send and receive events | `0` |
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue