mirror of https://github.com/dapr/docs.git
Merge branch 'v1.11' into workflow-review
This commit is contained in:
commit
db2cdd1626
|
@ -1,13 +1,13 @@
|
|||
name: Azure Static Web App Versioned
|
||||
name: Azure Static Web App v1.9
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v1.10
|
||||
- v1.11
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
branches:
|
||||
- v1.10
|
||||
- v1.11
|
||||
|
||||
jobs:
|
||||
build_and_deploy_job:
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
name: Build and Deploy Job
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
@ -23,19 +23,22 @@ jobs:
|
|||
run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli
|
||||
- name: Build And Deploy
|
||||
id: builddeploy
|
||||
uses: Azure/static-web-apps-deploy@v1
|
||||
uses: Azure/static-web-apps-deploy@v0.0.1-preview
|
||||
env:
|
||||
HUGO_ENV: production
|
||||
HUGO_VERSION: "0.100.2"
|
||||
with:
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
|
||||
skip_deploy_on_missing_secrets: true
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_11 }}
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
|
||||
skip_deploy_on_missing_secrets: true
|
||||
action: "upload"
|
||||
app_location: "/daprdocs"
|
||||
###### Repository/Build Configurations - These values can be configured to match your app requirements. ######
|
||||
# For more information regarding Static Web App workflow configurations, please visit: https://aka.ms/swaworkflowconfig
|
||||
app_location: "/daprdocs" # App source code path
|
||||
api_location: "api" # Api source code path - optional
|
||||
output_location: "public" # Built app content directory - optional
|
||||
app_build_command: "git config --global --add safe.directory /github/workspace && hugo"
|
||||
output_location: "public"
|
||||
skip_api_build: true
|
||||
###### End of Repository/Build Configurations ######
|
||||
|
||||
close_pull_request_job:
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
|
@ -44,7 +47,8 @@ jobs:
|
|||
steps:
|
||||
- name: Close Pull Request
|
||||
id: closepullrequest
|
||||
uses: Azure/static-web-apps-deploy@v1
|
||||
uses: Azure/static-web-apps-deploy@v0.0.1-preview
|
||||
with:
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_10 }}
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_11 }}
|
||||
skip_deploy_on_missing_secrets: true
|
||||
action: "close"
|
|
@ -26,3 +26,6 @@
|
|||
[submodule "sdkdocs/pluggable-components/dotnet"]
|
||||
path = sdkdocs/pluggable-components/dotnet
|
||||
url = https://github.com/dapr-sandbox/components-dotnet-sdk
|
||||
[submodule "sdkdocs/pluggable-components/go"]
|
||||
path = sdkdocs/pluggable-components/go
|
||||
url = https://github.com/dapr-sandbox/components-go-sdk
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Site Configuration
|
||||
baseURL = "https://docs.dapr.io"
|
||||
baseURL = "https://v1-11.docs.dapr.io"
|
||||
title = "Dapr Docs"
|
||||
theme = "docsy"
|
||||
disableFastRender = true
|
||||
|
@ -71,6 +71,10 @@ id = "G-60C6Q1ETC1"
|
|||
source = "../sdkdocs/pluggable-components/dotnet/daprdocs/content/en/dotnet-sdk-docs"
|
||||
target = "content/developing-applications/develop-components/pluggable-components/pluggable-components-sdks/pluggable-components-dotnet"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/pluggable-components/go/daprdocs/content/en/go-sdk-docs"
|
||||
target = "content/developing-applications/develop-components/pluggable-components/pluggable-components-sdks/pluggable-components-go"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/dotnet/daprdocs/content/en/dotnet-sdk-contributing"
|
||||
target = "content/contributing/sdk-contrib/"
|
||||
|
@ -164,23 +168,20 @@ offlineSearch = false
|
|||
github_repo = "https://github.com/dapr/docs"
|
||||
github_project_repo = "https://github.com/dapr/dapr"
|
||||
github_subdir = "daprdocs"
|
||||
github_branch = "v1.10"
|
||||
github_branch = "v1.11"
|
||||
|
||||
# Versioning
|
||||
version_menu = "v1.10 (latest)"
|
||||
version = "v1.10"
|
||||
version_menu = "v1.11 (preview)"
|
||||
version = "v1.11"
|
||||
archived_version = false
|
||||
url_latest_version = "https://docs.dapr.io"
|
||||
|
||||
[[params.versions]]
|
||||
version = "v1.11 (preview)"
|
||||
url = "https://v1-11.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.10 (latest)"
|
||||
url = "#"
|
||||
[[params.versions]]
|
||||
version = "v1.9"
|
||||
url = "https://v1-9.docs.dapr.io"
|
||||
version = "v1.10 (latest)"
|
||||
url = "https://docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.9"
|
||||
url = "https://v1-9.docs.dapr.io"
|
||||
|
|
|
@ -27,6 +27,6 @@ The following are the building blocks provided by Dapr:
|
|||
| [**Actors**]({{< ref "actors-overview.md" >}}) | `/v1.0/actors` | An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the virtual actor pattern which provides a single-threaded programming model and where actors are garbage collected when not in use.
|
||||
| [**Observability**]({{< ref "observability-concept.md" >}}) | `N/A` | Dapr system components and runtime emit metrics, logs, and traces to debug, operate and monitor Dapr system services, components and user applications.
|
||||
| [**Secrets**]({{< ref "secrets-overview.md" >}}) | `/v1.0/secrets` | Dapr provides a secrets building block API and integrates with secret stores such as public cloud stores, local stores and Kubernetes to store the secrets. Services can call the secrets API to retrieve secrets, for example to get a connection string to a database.
|
||||
| [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | `/v1.0-alpha1/configuration` | The Configuration API enables you to retrieve and subscribe to application configuration items for supported configuration stores. This enables an application to retrieve specific configuration information, for example, at start up or when configuration changes are made in the store.
|
||||
| [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | `/v1.0/configuration` | The Configuration API enables you to retrieve and subscribe to application configuration items for supported configuration stores. This enables an application to retrieve specific configuration information, for example, at start up or when configuration changes are made in the store.
|
||||
| [**Distributed lock**]({{< ref "distributed-lock-api-overview.md" >}}) | `/v1.0-alpha1/lock` | The distributed lock API enables you to take a lock on a resource so that multiple instances of an application can access the resource without conflicts and provide consistency guarantees.
|
||||
| [**Workflows**]({{< ref "workflow-overview.md" >}}) | `/v1.0-alpha1/workflow` | The Workflow API enables you to define long running, persistent processes or data flows that span multiple microservices using Dapr workflows or workflow components. The Workflow API can be combined with other Dapr API building blocks. For example, a workflow can call another service with service invocation or retrieve secrets, providing flexibility and portability.
|
|
@ -36,7 +36,7 @@ Each of these building block APIs is independent, meaning that you can use one,
|
|||
| Building Block | Description |
|
||||
|----------------|-------------|
|
||||
| [**Service-to-service invocation**]({{< ref "service-invocation-overview.md" >}}) | Resilient service-to-service invocation enables method calls, including retries, on remote services, wherever they are located in the supported hosting environment.
|
||||
| [**State management**]({{< ref "state-management-overview.md" >}}) | With state management for storing and querying key/value pairs, long-running, highly available, stateful services can be easily written alongside stateless services in your application. The state store is pluggable and examples include AWS DynamoDB, Azure CosmosDB, Azure SQL Server, GCP Firebase, PostgreSQL or Redis, among others.
|
||||
| [**State management**]({{< ref "state-management-overview.md" >}}) | With state management for storing and querying key/value pairs, long-running, highly available, stateful services can be easily written alongside stateless services in your application. The state store is pluggable and examples include AWS DynamoDB, Azure Cosmos DB, Azure SQL Server, GCP Firebase, PostgreSQL or Redis, among others.
|
||||
| [**Publish and subscribe**]({{< ref "pubsub-overview.md" >}}) | Publishing events and subscribing to topics between services enables event-driven architectures to simplify horizontal scalability and make them resilient to failure. Dapr provides at-least-once message delivery guarantee, message TTL, consumer groups and other advance features.
|
||||
| [**Resource bindings**]({{< ref "bindings-overview.md" >}}) | Resource bindings with triggers builds further on event-driven architectures for scale and resiliency by receiving and sending events to and from any external source such as databases, queues, file systems, etc.
|
||||
| [**Actors**]({{< ref "actors-overview.md" >}}) | A pattern for stateful and stateless objects that makes concurrency simple, with method and state encapsulation. Dapr provides many capabilities in its actor runtime, including concurrency, state, and life-cycle management for actor activation/deactivation, and timers and reminders to wake up actors.
|
||||
|
|
|
@ -34,7 +34,6 @@ Dapr does work with service meshes. In the case where both are deployed together
|
|||
Watch these recordings from the Dapr community calls showing presentations on running Dapr together with different service meshes:
|
||||
- General overview and a demo of [Dapr and Linkerd](https://youtu.be/xxU68ewRmz8?t=142)
|
||||
- Demo of running [Dapr and Istio](https://youtu.be/ngIDOQApx8g?t=335)
|
||||
- Learn more about [running Dapr with Open Service Mesh (OSM)]({{<ref open-service-mesh>}}).
|
||||
|
||||
## When to use Dapr or a service mesh or both
|
||||
Should you be using Dapr, a service mesh, or both? The answer depends on your requirements. If, for example, you are looking to use Dapr for one or more building blocks such as state management or pub/sub, and you are considering using a service mesh just for network security or observability, you may find that Dapr is a good fit and that a service mesh is not required.
|
||||
|
|
|
@ -16,6 +16,7 @@ This page details all of the common terms you may come across in the Dapr docs.
|
|||
| Configuration | A YAML file declaring all of the settings for Dapr sidecars or the Dapr control plane. This is where you can configure control plane mTLS settings, or the tracing and middleware settings for an application instance. | [Dapr configuration]({{< ref configuration-concept.md >}})
|
||||
| Dapr | Distributed Application Runtime. | [Dapr overview]({{< ref overview.md >}})
|
||||
| Dapr control plane | A collection of services that are part of a Dapr installation on a hosting platform such as a Kubernetes cluster. This allows Dapr-enabled applications to run on the platform and handles Dapr capabilities such as actor placement, Dapr sidecar injection, or certificate issuance/rollover. | [Self-hosted overview]({{< ref self-hosted-overview >}})<br />[Kubernetes overview]({{< ref kubernetes-overview >}})
|
||||
| HTTPEndpoint | HTTPEndpoint is a Dapr resource use to identify non-Dapr endpoints to invoke via the service invocation API. | [Service invocation API]({{< ref service_invocation_api.md >}})
|
||||
| Self-hosted | Windows/macOS/Linux machine(s) where you can run your applications with Dapr. Dapr provides the capability to run on machines in "self-hosted" mode. | [Self-hosted mode]({{< ref self-hosted-overview.md >}})
|
||||
| Service | A running application or binary. This can refer to your application or to a Dapr application.
|
||||
| Sidecar | A program that runs alongside your application as a separate process or container. | [Sidecar pattern](https://docs.microsoft.com/azure/architecture/patterns/sidecar)
|
||||
|
|
|
@ -260,7 +260,7 @@ You need Microsoft employee access to create a new Azure Static Web App.
|
|||
- One saying your request was received.
|
||||
- One saying the request was completed.
|
||||
1. Back in the Azure Portal, click **Add**. You may need to click a couple times to account for DNS delay.
|
||||
1. An SSL is now generated for you and the DNS record is saved. This may take 2-3 minutes.
|
||||
1. A TLS certificate is now generated for you and the DNS record is saved. This may take 2-3 minutes.
|
||||
1. Navigate to `https://v1-2.docs.dapr.io` and verify a blank website loads correctly.
|
||||
|
||||
### Configure future website branch
|
||||
|
|
|
@ -86,6 +86,10 @@ The Dapr actor runtime provides a simple turn-based access model for accessing a
|
|||
- [Learn more about actor reentrancy]({{< ref "actor-reentrancy.md" >}})
|
||||
- [Learn more about the turn-based access model]({{< ref "actors-features-concepts.md#turn-based-access" >}})
|
||||
|
||||
### State
|
||||
|
||||
Transactional state stores can be used to store actor state. To specify which state store to use for actors, specify value of property `actorStateStore` as `true` in the state store component's metadata section. Actors state is stored with a specific scheme in transactional state stores, allowing for consistent querying. Only a single state store component can be used as the state store for all actors. Read the [state API reference]({{< ref state_api.md >}}) and the [actors API reference]({{< ref actors_api.md >}}) to learn more about state stores for actors.
|
||||
|
||||
### Actor timers and reminders
|
||||
|
||||
Actors can schedule periodic work on themselves by registering either timers or reminders.
|
||||
|
@ -105,4 +109,4 @@ This distinction allows users to trade off between light-weight but stateless ti
|
|||
## Related links
|
||||
|
||||
- [Actors API reference]({{< ref actors_api.md >}})
|
||||
- Refer to the [Dapr SDK documentation and examples]({{< ref "developing-applications/sdks/#sdk-languages" >}}).
|
||||
- Refer to the [Dapr SDK documentation and examples]({{< ref "developing-applications/sdks/#sdk-languages" >}}).
|
||||
|
|
|
@ -8,7 +8,7 @@ aliases:
|
|||
- "/developing-applications/building-blocks/actors/actors-background"
|
||||
---
|
||||
|
||||
[Actor reminders]({{< ref "howto-actors-partitioning.md#actor-reminders" >}}) are persisted and continue to be triggered after sidecar restarts. Applications with multiple reminders registered can experience the following issues:
|
||||
[Actor reminders]({{< ref "actors-timers-reminders.md#actor-reminders" >}}) are persisted and continue to be triggered after sidecar restarts. Applications with multiple reminders registered can experience the following issues:
|
||||
|
||||
- Low throughput on reminders registration and de-registration
|
||||
- Limited number of reminders registered based on the single record size limit on the state store
|
||||
|
|
|
@ -24,6 +24,10 @@ For example, with bindings, your microservice can respond to incoming Twilio/SMS
|
|||
Bindings are developed independently of Dapr runtime. You can [view and contribute to the bindings](https://github.com/dapr/components-contrib/tree/master/bindings).
|
||||
{{% /alert %}}
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
If you are using the HTTP Binding, then it is preferable to use [service invocation]({{< ref service_invocation_api.md >}}) instead. Read [How-To: Invoke Non-Dapr Endpoints using HTTP]({{< ref "howto-invoke-non-dapr-endpoints.md" >}}) for more information.
|
||||
{{% /alert %}}
|
||||
|
||||
## Input bindings
|
||||
|
||||
With input bindings, you can trigger your application when an event from an external resource occurs. An optional payload and metadata may be sent with the request.
|
||||
|
|
|
@ -6,6 +6,7 @@ description: "Invoke external systems with output bindings"
|
|||
weight: 300
|
||||
---
|
||||
|
||||
|
||||
With output bindings, you can invoke external resources. An optional payload and metadata can be sent with the invocation request.
|
||||
|
||||
<img src="/images/howto-bindings/kafka-output-binding.png" width=1000 alt="Diagram showing bindings of example service">
|
||||
|
|
|
@ -40,6 +40,11 @@ Want to put the Dapr configuration API to the test? Walk through the following q
|
|||
|
||||
Want to skip the quickstarts? Not a problem. You can try out the configuration building block directly in your application to read and manage configuration data. After [Dapr is installed]({{< ref "getting-started/_index.md" >}}), you can begin using the configuration API starting with [the configuration how-to guide]({{< ref howto-manage-configuration.md >}}).
|
||||
|
||||
## Watch the demo
|
||||
|
||||
Watch [this demo of using the Dapr Configuration building block](https://youtu.be/tNq-n1XQuLA?t=496)
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/tNq-n1XQuLA?start=496" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
## Next steps
|
||||
Follow these guides on:
|
||||
|
|
|
@ -67,9 +67,11 @@ spec:
|
|||
```
|
||||
|
||||
## Retrieve Configuration Items
|
||||
### Get configuration items using Dapr SDKs
|
||||
### Get configuration items
|
||||
|
||||
{{< tabs ".NET" Java Python>}}
|
||||
The following example shows how to get a saved configuration item using the Dapr Configuration API.
|
||||
|
||||
{{< tabs ".NET" Java Python Go Javascript "HTTP API (BASH)" "HTTP API (Powershell)">}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
|
@ -87,7 +89,6 @@ namespace ConfigurationApi
|
|||
{
|
||||
private static readonly string CONFIG_STORE_NAME = "configstore";
|
||||
|
||||
[Obsolete]
|
||||
public static async Task Main(string[] args)
|
||||
{
|
||||
using var client = new DaprClientBuilder().Build();
|
||||
|
@ -105,7 +106,7 @@ namespace ConfigurationApi
|
|||
```java
|
||||
//dependencies
|
||||
import io.dapr.client.DaprClientBuilder;
|
||||
import io.dapr.client.DaprPreviewClient;
|
||||
import io.dapr.client.DaprClient;
|
||||
import io.dapr.client.domain.ConfigurationItem;
|
||||
import io.dapr.client.domain.GetConfigurationRequest;
|
||||
import io.dapr.client.domain.SubscribeConfigurationRequest;
|
||||
|
@ -116,7 +117,7 @@ import reactor.core.publisher.Mono;
|
|||
private static final String CONFIG_STORE_NAME = "configstore";
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) {
|
||||
try (DaprClient client = (new DaprClientBuilder()).build()) {
|
||||
List<String> keys = new ArrayList<>();
|
||||
keys.add("orderId1");
|
||||
keys.add("orderId2");
|
||||
|
@ -150,79 +151,31 @@ with DaprClient() as d:
|
|||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Get configuration items using gRPC API
|
||||
|
||||
Using your [favorite language](https://grpc.io/docs/languages/), create a Dapr gRPC client from the [Dapr proto](https://github.com/dapr/dapr/blob/master/dapr/proto/runtime/v1/dapr.proto). The following examples show Java, C#, Python and Javascript clients.
|
||||
|
||||
{{< tabs Java Dotnet Python Javascript >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```java
|
||||
```go
|
||||
package main
|
||||
|
||||
Dapr.ServiceBlockingStub stub = Dapr.newBlockingStub(channel);
|
||||
stub.GetConfigurationAlpha1(new GetConfigurationRequest{ StoreName = "redisconfigstore", Keys = new String[]{"myconfig"} });
|
||||
```
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
{{% /codetab %}}
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
)
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
|
||||
var call = client.GetConfigurationAlpha1(new GetConfigurationRequest { StoreName = "redisconfigstore", Keys = new String[]{"myconfig"} });
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```python
|
||||
response = stub.GetConfigurationAlpha1(request={ StoreName: 'redisconfigstore', Keys = ['myconfig'] })
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```javascript
|
||||
client.GetConfigurationAlpha1({ StoreName: 'redisconfigstore', Keys = ['myconfig'] })
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Watch configuration items using Dapr SDKs
|
||||
|
||||
{{< tabs "Dotnet Extension" "Dotnet Client">}}
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
[Obsolete("Configuration API is an Alpha API. Obsolete will be removed when the API is no longer Alpha")]
|
||||
public static void Main(string[] args)
|
||||
{
|
||||
CreateHostBuilder(args).Build().Run();
|
||||
}
|
||||
|
||||
public static IHostBuilder CreateHostBuilder(string[] args)
|
||||
{
|
||||
var client = new DaprClientBuilder().Build();
|
||||
return Host.CreateDefaultBuilder(args)
|
||||
.ConfigureAppConfiguration(config =>
|
||||
{
|
||||
// Get the initial value from the configuration component.
|
||||
config.AddDaprConfigurationStore("redisconfig", new List<string>() { "withdrawVersion" }, client, TimeSpan.FromSeconds(20));
|
||||
|
||||
// Watch the keys in the configuration component and update it in local configurations.
|
||||
config.AddStreamingDaprConfigurationStore("redisconfig", new List<string>() { "withdrawVersion", "source" }, client, TimeSpan.FromSeconds(20));
|
||||
})
|
||||
.ConfigureWebHostDefaults(webBuilder =>
|
||||
{
|
||||
webBuilder.UseStartup<Startup>();
|
||||
});
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
items, err := client.GetConfigurationItems(ctx, "configstore", ["orderId1","orderId2"])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for key, item := range items {
|
||||
fmt.Printf("get config: key = %s value = %s version = %s",key,(*item).Value, (*item).Version)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -230,106 +183,502 @@ public static IHostBuilder CreateHostBuilder(string[] args)
|
|||
|
||||
{{% codetab %}}
|
||||
|
||||
```js
|
||||
import { CommunicationProtocolEnum, DaprClient } from "@dapr/dapr";
|
||||
|
||||
// JS SDK does not support Configuration API over HTTP protocol yet
|
||||
const protocol = CommunicationProtocolEnum.GRPC;
|
||||
const host = process.env.DAPR_HOST ?? "localhost";
|
||||
const port = process.env.DAPR_GRPC_PORT ?? 3500;
|
||||
|
||||
const DAPR_CONFIGURATION_STORE = "configstore";
|
||||
const CONFIGURATION_ITEMS = ["orderId1", "orderId2"];
|
||||
|
||||
async function main() {
|
||||
const client = new DaprClient(host, port, protocol);
|
||||
// Get config items from the config store
|
||||
try {
|
||||
const config = await client.configuration.get(DAPR_CONFIGURATION_STORE, CONFIGURATION_ITEMS);
|
||||
Object.keys(config.items).forEach((key) => {
|
||||
console.log("Configuration for " + key + ":", JSON.stringify(config.items[key]));
|
||||
});
|
||||
} catch (error) {
|
||||
console.log("Could not get config item, err:" + error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((e) => console.error(e));
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
Launch a dapr sidecar:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --dapr-http-port 3601
|
||||
```
|
||||
|
||||
In a separate terminal, get the configuration item saved earlier:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3601/v1.0/configuration/configstore?key=orderId1
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
Launch a Dapr sidecar:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --dapr-http-port 3601
|
||||
```
|
||||
|
||||
In a separate terminal, get the configuration item saved earlier:
|
||||
|
||||
```powershell
|
||||
Invoke-RestMethod -Uri 'http://localhost:3601/v1.0/configuration/configstore?key=orderId1'
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
### Subscribe to configuration item updates
|
||||
|
||||
Below are code examples that leverage SDKs to subscribe to keys `[orderId1, orderId2]` using `configstore` store component.
|
||||
|
||||
{{< tabs ".NET" "ASP.NET Core" Java Python Go Javascript>}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
public IDictionary<string, string> Data { get; set; } = new Dictionary<string, string>();
|
||||
public string Id { get; set; } = string.Empty;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading.Tasks;
|
||||
using Dapr.Client;
|
||||
|
||||
public async Task WatchConfiguration(DaprClient daprClient, string store, IReadOnlyList<string> keys, Dictionary<string, string> metadata, CancellationToken token = default)
|
||||
const string DAPR_CONFIGURATION_STORE = "configstore";
|
||||
var CONFIGURATION_KEYS = new List<string> { "orderId1", "orderId2" };
|
||||
var client = new DaprClientBuilder().Build();
|
||||
|
||||
// Subscribe for configuration changes
|
||||
SubscribeConfigurationResponse subscribe = await client.SubscribeConfiguration(DAPR_CONFIGURATION_STORE, CONFIGURATION_ITEMS);
|
||||
|
||||
// Print configuration changes
|
||||
await foreach (var items in subscribe.Source)
|
||||
{
|
||||
// Initialize the gRPC Stream that will provide configuration updates.
|
||||
var subscribeConfigurationResponse = await daprClient.SubscribeConfiguration(store, keys, metadata, token);
|
||||
// First invocation when app subscribes to config changes only returns subscription id
|
||||
if (items.Keys.Count == 0)
|
||||
{
|
||||
Console.WriteLine("App subscribed to config changes with subscription id: " + subscribe.Id);
|
||||
subscriptionId = subscribe.Id;
|
||||
continue;
|
||||
}
|
||||
var cfg = System.Text.Json.JsonSerializer.Serialize(items);
|
||||
Console.WriteLine("Configuration update " + cfg);
|
||||
}
|
||||
```
|
||||
|
||||
// The response contains a data source which is an IAsyncEnumerable, so it can be iterated through via an awaited foreach.
|
||||
await foreach (var items in subscribeConfigurationResponse.Source.WithCancellation(token))
|
||||
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing -- dotnet run
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
using System;
|
||||
using Microsoft.AspNetCore.Hosting;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Dapr.Client;
|
||||
using Dapr.Extensions.Configuration;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
|
||||
namespace ConfigurationApi
|
||||
{
|
||||
public class Program
|
||||
{
|
||||
// Each iteration from the stream can contain all the keys that were queried for, so it must be individually iterated through.
|
||||
var data = new Dictionary<string, string>(Data);
|
||||
foreach (var item in items)
|
||||
public static void Main(string[] args)
|
||||
{
|
||||
// The Id in the response is used to unsubscribe.
|
||||
Id = subscribeConfigurationResponse.Id;
|
||||
data[item.Key] = item.Value;
|
||||
Console.WriteLine("Starting application.");
|
||||
CreateHostBuilder(args).Build().Run();
|
||||
Console.WriteLine("Closing application.");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates WebHost Builder.
|
||||
/// </summary>
|
||||
/// <param name="args">Arguments.</param>
|
||||
/// <returns>Returns IHostbuilder.</returns>
|
||||
public static IHostBuilder CreateHostBuilder(string[] args)
|
||||
{
|
||||
var client = new DaprClientBuilder().Build();
|
||||
return Host.CreateDefaultBuilder(args)
|
||||
.ConfigureAppConfiguration(config =>
|
||||
{
|
||||
// Get the initial value and continue to watch it for changes.
|
||||
config.AddDaprConfigurationStore("configstore", new List<string>() { "orderId1","orderId2" }, client, TimeSpan.FromSeconds(20));
|
||||
config.AddStreamingDaprConfigurationStore("configstore", new List<string>() { "orderId1","orderId2" }, client, TimeSpan.FromSeconds(20));
|
||||
|
||||
})
|
||||
.ConfigureWebHostDefaults(webBuilder =>
|
||||
{
|
||||
webBuilder.UseStartup<Startup>();
|
||||
});
|
||||
}
|
||||
Data = data;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing -- dotnet run
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
{{< /tabs >}}
|
||||
|
||||
### Watch configuration items using gRPC API
|
||||
{{% codetab %}}
|
||||
|
||||
Create a Dapr gRPC client from the [Dapr proto](https://github.com/dapr/dapr/blob/master/dapr/proto/runtime/v1/dapr.proto) using your [preferred language](https://grpc.io/docs/languages/). Use the `SubscribeConfigurationAlpha1` proto method on your client stub to start subscribing to events. The method accepts the following request object:
|
||||
```java
|
||||
import io.dapr.client.DaprClientBuilder;
|
||||
import io.dapr.client.DaprClient;
|
||||
import io.dapr.client.domain.ConfigurationItem;
|
||||
import io.dapr.client.domain.GetConfigurationRequest;
|
||||
import io.dapr.client.domain.SubscribeConfigurationRequest;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
```proto
|
||||
message SubscribeConfigurationRequest {
|
||||
// The name of configuration store.
|
||||
string store_name = 1;
|
||||
//code
|
||||
private static final String CONFIG_STORE_NAME = "configstore";
|
||||
private static String subscriptionId = null;
|
||||
|
||||
// Optional. The key of the configuration item to fetch.
|
||||
// If set, only query for the specified configuration items.
|
||||
// Empty list means fetch all.
|
||||
repeated string keys = 2;
|
||||
public static void main(String[] args) throws Exception {
|
||||
try (DaprClient client = (new DaprClientBuilder()).build()) {
|
||||
// Subscribe for config changes
|
||||
List<String> keys = new ArrayList<>();
|
||||
keys.add("orderId1");
|
||||
keys.add("orderId2");
|
||||
Flux<SubscribeConfigurationResponse> subscription = client.subscribeConfiguration(DAPR_CONFIGURATON_STORE,keys);
|
||||
|
||||
// The metadata which will be sent to configuration store components.
|
||||
map<string,string> metadata = 3;
|
||||
// Read config changes for 20 seconds
|
||||
subscription.subscribe((response) -> {
|
||||
// First ever response contains the subscription id
|
||||
if (response.getItems() == null || response.getItems().isEmpty()) {
|
||||
subscriptionId = response.getSubscriptionId();
|
||||
System.out.println("App subscribed to config changes with subscription id: " + subscriptionId);
|
||||
} else {
|
||||
response.getItems().forEach((k, v) -> {
|
||||
System.out.println("Configuration update for " + k + ": {'value':'" + v.getValue() + "'}");
|
||||
});
|
||||
}
|
||||
});
|
||||
Thread.sleep(20000);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Using this method, you can subscribe to changes in specific keys for a given configuration store. gRPC streaming varies widely based on language - see the [gRPC examples here](https://grpc.io/docs/languages/) for usage.
|
||||
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
|
||||
|
||||
Below are the examples in sdks:
|
||||
```bash
|
||||
dapr run --app-id orderprocessing -- -- mvn spring-boot:run
|
||||
|
||||
{{< tabs Python>}}
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```python
|
||||
#dependencies
|
||||
from dapr.clients import DaprClient
|
||||
#code
|
||||
|
||||
def handler(id: str, resp: ConfigurationResponse):
|
||||
for key in resp.items:
|
||||
print(f"Subscribed item received key={key} value={resp.items[key].value} "
|
||||
f"version={resp.items[key].version} "
|
||||
f"metadata={resp.items[key].metadata}", flush=True)
|
||||
|
||||
def executeConfiguration():
|
||||
with DaprClient() as d:
|
||||
storeName = 'configurationstore'
|
||||
keys = ['orderId1', 'orderId2']
|
||||
id = d.subscribe_configuration(store_name=storeName, keys=keys,
|
||||
handler=handler, config_metadata={})
|
||||
print("Subscription ID is", id, flush=True)
|
||||
sleep(20)
|
||||
|
||||
executeConfiguration()
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing -- python3 OrderProcessingService.py
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
subscribeID, err := client.SubscribeConfigurationItems(ctx, "configstore", []string{"orderId1", "orderId2"}, func(id string, items map[string]*dapr.ConfigurationItem) {
|
||||
for k, v := range items {
|
||||
fmt.Printf("get updated config key = %s, value = %s version = %s \n", k, v.Value, v.Version)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(20*time.Second)
|
||||
}
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing -- go run main.go
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```js
|
||||
import { CommunicationProtocolEnum, DaprClient } from "@dapr/dapr";
|
||||
|
||||
// JS SDK does not support Configuration API over HTTP protocol yet
|
||||
const protocol = CommunicationProtocolEnum.GRPC;
|
||||
const host = process.env.DAPR_HOST ?? "localhost";
|
||||
const port = process.env.DAPR_GRPC_PORT ?? 3500;
|
||||
|
||||
const DAPR_CONFIGURATION_STORE = "configstore";
|
||||
const CONFIGURATION_ITEMS = ["orderId1", "orderId2"];
|
||||
|
||||
async function main() {
|
||||
const client = new DaprClient(host, port, protocol);
|
||||
// Subscribe to config updates
|
||||
try {
|
||||
const stream = await client.configuration.subscribeWithKeys(
|
||||
DAPR_CONFIGURATION_STORE,
|
||||
CONFIGURATION_ITEMS,
|
||||
(config) => {
|
||||
console.log("Configuration update", JSON.stringify(config.items));
|
||||
}
|
||||
);
|
||||
// Unsubscribe to config updates and exit app after 20 seconds
|
||||
setTimeout(() => {
|
||||
stream.stop();
|
||||
console.log("App unsubscribed to config changes");
|
||||
process.exit(0);
|
||||
}, 20000);
|
||||
} catch (error) {
|
||||
console.log("Error subscribing to config updates, err:" + error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
main().catch((e) => console.error(e));
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --app-protocol grpc --dapr-grpc-port 3500 -- node index.js
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
### Unsubscribe from configuration item updates
|
||||
|
||||
After you've subscribed to watch configuration items, you will receive updates for all of the subscribed keys. To stop receiving updates, you need to explicitly call the unsubscribe API.
|
||||
|
||||
Following are the code examples showing how you can unsubscribe to configuration updates using unsubscribe API.
|
||||
|
||||
{{< tabs ".NET" Java Python Go Javascript "HTTP API (BASH)" "HTTP API (Powershell)">}}
|
||||
|
||||
{{% codetab %}}
|
||||
```csharp
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading.Tasks;
|
||||
using Dapr.Client;
|
||||
|
||||
const string DAPR_CONFIGURATION_STORE = "configstore";
|
||||
var client = new DaprClientBuilder().Build();
|
||||
|
||||
// Unsubscribe to config updates and exit the app
|
||||
async Task unsubscribe(string subscriptionId)
|
||||
{
|
||||
try
|
||||
{
|
||||
await client.UnsubscribeConfiguration(DAPR_CONFIGURATION_STORE, subscriptionId);
|
||||
Console.WriteLine("App unsubscribed from config changes");
|
||||
Environment.Exit(0);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine("Error unsubscribing from config updates: " + ex.Message);
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```java
|
||||
import io.dapr.client.DaprClientBuilder;
|
||||
import io.dapr.client.DaprClient;
|
||||
import io.dapr.client.domain.ConfigurationItem;
|
||||
import io.dapr.client.domain.GetConfigurationRequest;
|
||||
import io.dapr.client.domain.SubscribeConfigurationRequest;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
//code
|
||||
private static final String CONFIG_STORE_NAME = "configstore";
|
||||
private static String subscriptionId = null;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
try (DaprClient client = (new DaprClientBuilder()).build()) {
|
||||
// Unsubscribe from config changes
|
||||
UnsubscribeConfigurationResponse unsubscribe = client
|
||||
.unsubscribeConfiguration(subscriptionId, DAPR_CONFIGURATON_STORE).block();
|
||||
if (unsubscribe.getIsUnsubscribed()) {
|
||||
System.out.println("App unsubscribed to config changes");
|
||||
} else {
|
||||
System.out.println("Error unsubscribing to config updates, err:" + unsubscribe.getMessage());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
System.out.println("Error unsubscribing to config updates," + e.getMessage());
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```python
|
||||
#dependencies
|
||||
import asyncio
|
||||
import time
|
||||
import logging
|
||||
from dapr.clients import DaprClient
|
||||
#code
|
||||
async def executeConfiguration():
|
||||
with DaprClient() as d:
|
||||
CONFIG_STORE_NAME = 'configstore'
|
||||
key = 'orderId'
|
||||
# Subscribe to configuration by key.
|
||||
configuration = await d.subscribe_configuration(store_name=CONFIG_STORE_NAME, keys=[key], config_metadata={})
|
||||
if configuration != None:
|
||||
items = configuration.get_items()
|
||||
for item in items:
|
||||
print(f"Subscribe key={item.key} value={item.value} version={item.version}", flush=True)
|
||||
else:
|
||||
print("Nothing yet")
|
||||
asyncio.run(executeConfiguration())
|
||||
```
|
||||
subscriptionID = ""
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --resources-path components/ -- python3 OrderProcessingService.py
|
||||
with DaprClient() as d:
|
||||
isSuccess = d.unsubscribe_configuration(store_name='configstore', id=subscriptionID)
|
||||
print(f"Unsubscribed successfully? {isSuccess}", flush=True)
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
{{% codetab %}}
|
||||
```go
|
||||
package main
|
||||
|
||||
#### Stop watching configuration items
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
After you've subscribed to watch configuration items, the gRPC-server stream starts. Since this stream thread does not close itself, you have to explicitly call the `UnSubscribeConfigurationRequest` API to unsubscribe. This method accepts the following request object:
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
)
|
||||
|
||||
```proto
|
||||
// UnSubscribeConfigurationRequest is the message to stop watching the key-value configuration.
|
||||
message UnSubscribeConfigurationRequest {
|
||||
// The name of configuration store.
|
||||
string store_name = 1;
|
||||
// Optional. The keys of the configuration item to stop watching.
|
||||
// Store_name and keys should match previous SubscribeConfigurationRequest's keys and store_name.
|
||||
// Once invoked, the subscription that is watching update for the key-value event is stopped
|
||||
repeated string keys = 2;
|
||||
var DAPR_CONFIGURATION_STORE = "configstore"
|
||||
var subscriptionID = ""
|
||||
|
||||
func main() {
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := client.UnsubscribeConfigurationItems(ctx, DAPR_CONFIGURATION_STORE , subscriptionID); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
Using this unsubscribe method, you can stop watching configuration update events. Dapr locates the subscription stream based on the `store_name` and any optional keys supplied and closes it.
|
||||
{{% codetab %}}
|
||||
```js
|
||||
import { CommunicationProtocolEnum, DaprClient } from "@dapr/dapr";
|
||||
|
||||
// JS SDK does not support Configuration API over HTTP protocol yet
|
||||
const protocol = CommunicationProtocolEnum.GRPC;
|
||||
const host = process.env.DAPR_HOST ?? "localhost";
|
||||
const port = process.env.DAPR_GRPC_PORT ?? 3500;
|
||||
|
||||
const DAPR_CONFIGURATION_STORE = "configstore";
|
||||
const CONFIGURATION_ITEMS = ["orderId1", "orderId2"];
|
||||
|
||||
async function main() {
|
||||
const client = new DaprClient(host, port, protocol);
|
||||
|
||||
try {
|
||||
const stream = await client.configuration.subscribeWithKeys(
|
||||
DAPR_CONFIGURATION_STORE,
|
||||
CONFIGURATION_ITEMS,
|
||||
(config) => {
|
||||
console.log("Configuration update", JSON.stringify(config.items));
|
||||
}
|
||||
);
|
||||
setTimeout(() => {
|
||||
// Unsubscribe to config updates
|
||||
stream.stop();
|
||||
console.log("App unsubscribed to config changes");
|
||||
process.exit(0);
|
||||
}, 20000);
|
||||
} catch (error) {
|
||||
console.log("Error subscribing to config updates, err:" + error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((e) => console.error(e));
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl 'http://localhost:<DAPR_HTTP_PORT>/v1.0/configuration/configstore/<subscription-id>/unsubscribe'
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```powershell
|
||||
Invoke-RestMethod -Uri 'http://localhost:<DAPR_HTTP_PORT>/v1.0/configuration/configstore/<subscription-id>/unsubscribe'
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
## Next steps
|
||||
|
||||
* Read [configuration API overview]({{< ref configuration-api-overview.md >}})
|
||||
* Read [configuration API overview]({{< ref configuration-api-overview.md >}})
|
||||
|
|
|
@ -26,9 +26,9 @@ App health checks are disabled by default.
|
|||
|
||||
### App health checks vs platform-level health checks
|
||||
|
||||
App health checks in Dapr are meant to be complementary to, and not replace, any platform-level health checks, like [liveness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) when running on Kubernetes.
|
||||
App health checks in Dapr are meant to be complementary to, and not replace, any platform-level health checks, like [liveness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) when running on Kubernetes.
|
||||
|
||||
Platform-level health checks (or liveness probes) generally ensure that the application is running, and cause the platform to restart the application in case of failures. For Kubernetes, a failing App Health check won't remove a pod from service discovery. This remains the responsibility of the Kubernetes liveness probe, _not_ Dapr.
|
||||
Platform-level health checks (or liveness probes) generally ensure that the application is running, and cause the platform to restart the application in case of failures.
|
||||
Unlike platform-level health checks, Dapr's app health checks focus on pausing work to an application that is currently unable to accept it, but is expected to be able to resume accepting work *eventually*. Goals include:
|
||||
|
||||
- Not bringing more load to an application that is already overloaded.
|
||||
|
@ -36,11 +36,9 @@ Unlike platform-level health checks, Dapr's app health checks focus on pausing w
|
|||
|
||||
In this regard, Dapr's app health checks are "softer", waiting for an application to be able to process work, rather than terminating the running process in a "hard" way.
|
||||
|
||||
## Configuring app health checks
|
||||
> Note that for Kubernetes, a failing App Health check won't remove a pod from service discovery: this remains the responsibility of the Kubernetes liveness probe, _not_ Dapr.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
App health checks are currently a **preview feature** and require the `AppHealthCheck` feature flag to be enabled. Refer to the documentation for [enabling preview features]({{<ref support-preview-features>}}).
|
||||
{{% /alert %}}
|
||||
## Configuring app health checks
|
||||
|
||||
App health checks are disabled by default, but can be enabled with either:
|
||||
|
||||
|
@ -54,21 +52,21 @@ The full list of options are listed in this table:
|
|||
| CLI flags | Kubernetes deployment annotation | Description | Default value |
|
||||
| ----------------------------- | ----------------------------------- | ----------- | ------------- |
|
||||
| `--enable-app-health-check` | `dapr.io/enable-app-health-check` | Boolean that enables the health checks | Disabled |
|
||||
| `--app-health-check-path` | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC) | `/health` |
|
||||
| `--app-health-check-path` | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC) | `/healthz` |
|
||||
| `--app-health-probe-interval` | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe | `5` |
|
||||
| `--app-health-probe-timeout` | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests | `500` |
|
||||
| `--app-health-threshold` | `dapr.io/app-health-threshold` | Max number of consecutive failures before the app is considered unhealthy | `3` |
|
||||
|
||||
> See the [full Dapr arguments and annotations reference]({{<ref arguments-annotations-overview>}}) for all options and how to enable them.
|
||||
|
||||
Additionally, app health checks are impacted by the protocol used for the app channel, which is configured with the `--app-protocol` flag (self-hosted) or the `dapr.io/app-protocol` annotation (Kubernetes); supported values are `http` (default) or `grpc`.
|
||||
Additionally, app health checks are impacted by the protocol used for the app channel, which is configured with the `--app-protocol` flag (self-hosted) or the `dapr.io/app-protocol` annotation (Kubernetes); supported values are `http` (default), `grpc`, `https`, `grpcs`, and `h2c` (HTTP/2 Cleartext).
|
||||
|
||||
### Health check paths
|
||||
|
||||
When using HTTP for `app-protocol`, Dapr performs health probes by making an HTTP call to the path specified in `app-health-check-path`, which is `/health` by default.
|
||||
When using HTTP (including `http`, `https`, and `h2c`) for `app-protocol`, Dapr performs health probes by making an HTTP call to the path specified in `app-health-check-path`, which is `/health` by default.
|
||||
For your app to be considered healthy, the response must have an HTTP status code in the 200-299 range. Any other status code is considered a failure. Dapr is only concerned with the status code of the response, and ignores any response header or body.
|
||||
|
||||
When using gRPC for the app channel, Dapr invokes the method `/dapr.proto.runtime.v1.AppCallbackHealthCheck/HealthCheck` in your application. Most likely, you will use a Dapr SDK to implement the handler for this method.
|
||||
When using gRPC for the app channel (`app-protocol` set to `grpc` or `grpcs`), Dapr invokes the method `/dapr.proto.runtime.v1.AppCallbackHealthCheck/HealthCheck` in your application. Most likely, you will use a Dapr SDK to implement the handler for this method.
|
||||
|
||||
While responding to a health probe request, your app *may* decide to perform additional internal health checks to determine if it's ready to process work from the Dapr runtime. However, this is not required; it's a choice that depends on your application's needs.
|
||||
|
||||
|
@ -88,8 +86,6 @@ Thresholds only apply to failures. A single successful response is enough for Da
|
|||
|
||||
## Example
|
||||
|
||||
Because app health checks are currently a preview feature, make sure to enable the `AppHealthCheck` feature flag. Refer to the documentation for [enabling preview features]({{<ref support-preview-features>}}) before following the examples below.
|
||||
|
||||
{{< tabs "Self-Hosted (CLI)" Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
|
|
@ -19,7 +19,7 @@ This article demonstrates how to deploy services each with an unique application
|
|||
|
||||
Dapr allows you to assign a global, unique ID for your app. This ID encapsulates the state for your application, regardless of the number of instances it may have.
|
||||
|
||||
{{< tabs Dotnet Java Python Go Javascript Kubernetes>}}
|
||||
{{< tabs Dotnet Java Python Go JavaScript Kubernetes>}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
|
@ -31,13 +31,13 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
|
||||
```
|
||||
|
||||
If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection:
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-ssl dotnet run
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https dotnet run
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-ssl dotnet run
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https dotnet run
|
||||
|
||||
```
|
||||
|
||||
|
@ -53,13 +53,13 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
|
||||
```
|
||||
|
||||
If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection:
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-ssl mvn spring-boot:run
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https mvn spring-boot:run
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-ssl mvn spring-boot:run
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https mvn spring-boot:run
|
||||
|
||||
```
|
||||
|
||||
|
@ -75,13 +75,13 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
|
||||
```
|
||||
|
||||
If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection:
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-ssl -- python3 CheckoutService.py
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https -- python3 CheckoutService.py
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-ssl -- python3 OrderProcessingService.py
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https -- python3 OrderProcessingService.py
|
||||
|
||||
```
|
||||
|
||||
|
@ -97,13 +97,13 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
|
||||
```
|
||||
|
||||
If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection:
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-ssl go run CheckoutService.go
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https go run CheckoutService.go
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-ssl go run OrderProcessingService.go
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https go run OrderProcessingService.go
|
||||
|
||||
```
|
||||
|
||||
|
@ -119,13 +119,13 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g
|
|||
|
||||
```
|
||||
|
||||
If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection:
|
||||
If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`:
|
||||
|
||||
```bash
|
||||
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-ssl npm start
|
||||
dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https npm start
|
||||
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-ssl npm start
|
||||
dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https npm start
|
||||
|
||||
```
|
||||
|
||||
|
@ -161,7 +161,7 @@ spec:
|
|||
...
|
||||
```
|
||||
|
||||
*If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection with the `app-ssl: "true"` annotation (full list [here]({{< ref arguments-annotations-overview.md >}}))*
|
||||
If your app uses a TLS connection, you can tell Dapr to invoke your app over TLS with the `app-protocol: "https"` annotation (full list [here]({{< ref arguments-annotations-overview.md >}})). Note that Dapr does not validate TLS certificates presented by the app.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
---
|
||||
type: docs
|
||||
title: "How-To: Invoke Non-Dapr Endpoints using HTTP"
|
||||
linkTitle: "How-To: Invoke Non-Dapr Endpoints"
|
||||
description: "Call Non-Dapr endpoints from Dapr applications using service invocation"
|
||||
weight: 2000
|
||||
---
|
||||
|
||||
This article demonstrates how to call a non-Dapr endpoint using Dapr over HTTP.
|
||||
|
||||
Using Dapr's service invocation API, you can communicate with endpoints that either use or do not use Dapr. Using Dapr to call endpoints that do not use Dapr not only provides a consistent API, but also the following [Dapr service invocation]({{< ref service-invocation-overview.md >}}) benefits:
|
||||
|
||||
- Ability to apply resiliency policies
|
||||
- Call observability with tracing & metrics
|
||||
- Security access control through scoping
|
||||
- Ability to utilize middleware pipeline components
|
||||
- Service discovery
|
||||
- Authentication through the use of headers
|
||||
|
||||
## HTTP service invocation to external services or non-Dapr endpoints
|
||||
Sometimes you need to call a non-Dapr HTTP endpoint. For example:
|
||||
- You may choose to only use Dapr in part of your overall application, including brownfield development
|
||||
- You may not have access to the code to migrate an existing application to use Dapr
|
||||
- You need to call an external HTTP service.
|
||||
|
||||
By defining an `HTTPEndpoint` resource, you declaratively define a way to interact with a non-Dapr endpoint. You then use the service invocation URL to invoke non-Dapr endpoints. Alternatively, you can place a non-Dapr Fully Qualified Domain Name (FQDN) endpoint URL directly into the service invocation URL.
|
||||
|
||||
### Order of precedence between HttpEndpoint, FQDN URL, and appId
|
||||
When using service invocation, the Dapr runtime follows a precedence order:
|
||||
|
||||
1. Is this a named `HTTPEndpoint` resource?
|
||||
2. Is this an FQDN URL with an`http://` or `https://` prefix?
|
||||
3. Is this an `appID`?
|
||||
|
||||
## Service invocation and non-Dapr HTTP endpoint
|
||||
The diagram below is an overview of how Dapr's service invocation works when invoking non-Dapr endpoints.
|
||||
|
||||
<img src="/images/service-invocation-overview-non-dapr-endpoint.png" width=800 alt="Diagram showing the steps of service invocation to non-Dapr endpoints">
|
||||
|
||||
1. Service A makes an HTTP call targeting Service B, a non-Dapr endpoint. The call goes to the local Dapr sidecar.
|
||||
2. Dapr discovers Service B's location using the `HTTPEndpoint` or FQDN URL.
|
||||
3. Dapr forwards the message to Service B.
|
||||
4. Service B runs its business logic code.
|
||||
5. Service B sends a response to Service A's Dapr sidecar.
|
||||
6. Service A receives the response.
|
||||
|
||||
## Using an HTTPEndpoint resource or FQDN URL for non-Dapr endpoints
|
||||
There are two ways to invoke a non-Dapr endpoint when communicating either to Dapr applications or non-Dapr applications. A Dapr application can invoke a non-Dapr endpoint by providing one of the following:
|
||||
|
||||
- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}}) guide for an example.
|
||||
|
||||
```sh
|
||||
localhost:3500/v1.0/invoke/<HTTPEndpoint-name>/method/<my-method>
|
||||
```
|
||||
|
||||
For example, with an `HTTPEndpoint` resource called "palpatine" and a method called "Order66", this would be:
|
||||
```sh
|
||||
curl http://localhost:3500/v1.0/invoke/palpatine/method/order66
|
||||
```
|
||||
|
||||
- A FQDN URL to the non-Dapr endpoint.
|
||||
|
||||
```sh
|
||||
localhost:3500/v1.0/invoke/<URL>/method/<my-method>
|
||||
```
|
||||
|
||||
For example, with an FQDN resource called `https://darthsidious.starwars`, this would be:
|
||||
```sh
|
||||
curl http://localhost:3500/v1.0/invoke/https://darthsidious.starwars/method/order66
|
||||
```
|
||||
|
||||
### Using appId when calling Dapr enabled applications
|
||||
AppIDs are always used to call Dapr applications with the `appID` and `my-method. Read the [How-To: Invoke services using HTTP]({{< ref howto-invoke-discover-services.md >}}) guide for more information. For example:
|
||||
|
||||
```sh
|
||||
localhost:3500/v1.0/invoke/<appID>/method/<my-method>
|
||||
```
|
||||
```sh
|
||||
curl http://localhost:3602/v1.0/invoke/orderprocessor/method/checkout
|
||||
```
|
||||
|
||||
## Related Links
|
||||
|
||||
- [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}})
|
||||
- [Service invocation overview]({{< ref service-invocation-overview.md >}})
|
||||
- [Service invocation API specification]({{< ref service_invocation_api.md >}})
|
||||
|
||||
## Community call demo
|
||||
Watch this [video](https://youtu.be/BEXJgLsO4hA?t=364) on how to use service invocation to call non-Dapr endpoints.
|
||||
<div class="embed-responsive embed-responsive-16by9">
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/BEXJgLsO4hA?t=364" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
</div>
|
|
@ -220,10 +220,11 @@ spec:
|
|||
dapr.io/app-port: "50051"
|
||||
...
|
||||
```
|
||||
*If your app uses an SSL connection, you can tell Dapr to invoke your app over an insecure SSL connection with the `app-ssl: "true"` annotation (full list [here]({{< ref arguments-annotations-overview.md >}}))*
|
||||
|
||||
The `dapr.io/app-protocol: "grpc"` annotation tells Dapr to invoke the app using gRPC.
|
||||
|
||||
If your app uses a TLS connection, you can tell Dapr to invoke your app over TLS with the `app-protocol: "grpcs"` annotation (full list [here]({{< ref arguments-annotations-overview.md >}})). Note that Dapr does not validate TLS certificates presented by the app.
|
||||
|
||||
### Namespaces
|
||||
|
||||
When running on [namespace supported platforms]({{< ref "service_invocation_api.md#namespace-supported-platforms" >}}), you include the namespace of the target app in the app ID: `myApp.production`
|
||||
|
|
|
@ -25,7 +25,7 @@ Dapr uses a sidecar architecture. To invoke an application using Dapr:
|
|||
- Each application communicates with its own instance of Dapr.
|
||||
- The Dapr instances discover and communicate with each other.
|
||||
|
||||
The diagram below is an overview of how Dapr's service invocation works.
|
||||
The diagram below is an overview of how Dapr's service invocation works between two Dapr-ized applications.
|
||||
|
||||
<img src="/images/service-invocation-overview.png" width=800 alt="Diagram showing the steps of service invocation">
|
||||
|
||||
|
@ -38,8 +38,10 @@ The diagram below is an overview of how Dapr's service invocation works.
|
|||
6. Dapr forwards the response to Service A's Dapr sidecar.
|
||||
7. Service A receives the response.
|
||||
|
||||
You can also call non-Dapr HTTP endpoints using the service invocation API. For example, you may only use Dapr in part of an overall application, may not have access to the code to migrate an existing application to use Dapr, or simply need to call an external HTTP service. Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information.
|
||||
|
||||
## Features
|
||||
Service invocation provides several features to make it easy for you to call methods between applications.
|
||||
Service invocation provides several features to make it easy for you to call methods between applications or to call external HTTP endpoints.
|
||||
|
||||
### HTTP and gRPC service invocation
|
||||
- **HTTP**: If you're already using HTTP protocols in your application, using the Dapr HTTP header might be the easiest way to get started. You don't need to change your existing endpoint URLs; just add the `dapr-app-id` header and you're ready to go. For more information, see [Invoke Services using HTTP]({{< ref howto-invoke-discover-services.md >}}).
|
||||
|
|
|
@ -93,6 +93,9 @@ You can group write, update, and delete operations into a request, which are the
|
|||
|
||||
Transactional state stores can be used to store actor state. To specify which state store to use for actors, specify value of property `actorStateStore` as `true` in the state store component's metadata section. Actors state is stored with a specific scheme in transactional state stores, allowing for consistent querying. Only a single state store component can be used as the state store for all actors. Read the [state API reference]({{< ref state_api.md >}}) and the [actors API reference]({{< ref actors_api.md >}}) to learn more about state stores for actors.
|
||||
|
||||
#### Time to Live (TTL) on actor state
|
||||
You should always set the TTL metadata field (`ttlInSeconds`), or the equivalent API call in your chosen SDK when saving actor state to ensure that state eventually removed. Read [actors overview]({{< ref actors-overview.md >}}) for more information.
|
||||
|
||||
### State encryption
|
||||
|
||||
Dapr supports automatic client encryption of application state with support for key rotations. This is supported on all Dapr state stores. For more info, read the [How-To: Encrypt application state]({{< ref howto-encrypt-state.md >}}) topic.
|
||||
|
@ -178,4 +181,4 @@ Want to skip the quickstarts? Not a problem. You can try out the state managemen
|
|||
- [How-To: Build a stateful service]({{< ref howto-stateful-service.md >}})
|
||||
- Review the list of [state store components]({{< ref supported-state-stores.md >}})
|
||||
- Read the [state management API reference]({{< ref state_api.md >}})
|
||||
- Read the [actors API reference]({{< ref actors_api.md >}})
|
||||
- Read the [actors API reference]({{< ref actors_api.md >}})
|
||||
|
|
|
@ -28,19 +28,88 @@ The Dapr sidecar doesn’t load any workflow definitions. Rather, the sidecar si
|
|||
|
||||
## Write the workflow activities
|
||||
|
||||
Define the workflow activities you'd like your workflow to perform. Activities are a class definition and can take inputs and outputs. Activities also participate in dependency injection, like binding to a Dapr client.
|
||||
[Workflow activities]({{< ref "workflow-features-concepts.md#workflow-activites" >}}) are the basic unit of work in a workflow and are the tasks that get orchestrated in the business process.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
Continuing the ASP.NET order processing example, the `OrderProcessingWorkflow` class is derived from a base class called `Workflow` with input and output parameter types.
|
||||
Define the workflow activities you'd like your workflow to perform. Activities are a class definition and can take inputs and outputs. Activities also participate in dependency injection, like binding to a Dapr client.
|
||||
|
||||
It also includes a `RunAsync` method that does the heavy lifting of the workflow and calls the workflow activities. The activities called in the example are:
|
||||
The activities called in the example below are:
|
||||
- `NotifyActivity`: Receive notification of a new order.
|
||||
- `ReserveInventoryActivity`: Check for sufficient inventory to meet the new order.
|
||||
- `ProcessPaymentActivity`: Process payment for the order. Includes `NotifyActivity` to send notification of successful order.
|
||||
|
||||
### NotifyActivity
|
||||
|
||||
```csharp
|
||||
public class NotifyActivity : WorkflowActivity<Notification, object>
|
||||
{
|
||||
//...
|
||||
|
||||
public NotifyActivity(ILoggerFactory loggerFactory)
|
||||
{
|
||||
this.logger = loggerFactory.CreateLogger<NotifyActivity>();
|
||||
}
|
||||
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
[See the full `NotifyActivity.cs` workflow activity example.](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Activities/NotifyActivity.cs)
|
||||
|
||||
### ReserveInventoryActivity
|
||||
|
||||
```csharp
|
||||
public class ReserveInventoryActivity : WorkflowActivity<InventoryRequest, InventoryResult>
|
||||
{
|
||||
//...
|
||||
|
||||
public ReserveInventoryActivity(ILoggerFactory loggerFactory, DaprClient client)
|
||||
{
|
||||
this.logger = loggerFactory.CreateLogger<ReserveInventoryActivity>();
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
//...
|
||||
|
||||
}
|
||||
```
|
||||
[See the full `ReserveInventoryActivity.cs` workflow activity example.](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Activities/ReserveInventoryActivity.cs)
|
||||
|
||||
### ProcessPaymentActivity
|
||||
|
||||
```csharp
|
||||
public class ProcessPaymentActivity : WorkflowActivity<PaymentRequest, object>
|
||||
{
|
||||
//...
|
||||
public ProcessPaymentActivity(ILoggerFactory loggerFactory)
|
||||
{
|
||||
this.logger = loggerFactory.CreateLogger<ProcessPaymentActivity>();
|
||||
}
|
||||
|
||||
//...
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
[See the full `ProcessPaymentActivity.cs` workflow activity example.](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Activities/ProcessPaymentActivity.cs)
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Write the workflow
|
||||
|
||||
Next, register and call the activites in a workflow.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
The `OrderProcessingWorkflow` class is derived from a base class called `Workflow` with input and output parameter types. It also includes a `RunAsync` method that does the heavy lifting of the workflow and calls the workflow activities.
|
||||
|
||||
```csharp
|
||||
class OrderProcessingWorkflow : Workflow<OrderPayload, OrderResult>
|
||||
{
|
||||
|
@ -73,19 +142,21 @@ It also includes a `RunAsync` method that does the heavy lifting of the workflow
|
|||
}
|
||||
```
|
||||
|
||||
[See the full workflow example in `OrderProcessingWorkflow.cs`.](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Workflows/OrderProcessingWorkflow.cs)
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Write the workflow
|
||||
## Write the application
|
||||
|
||||
Compose the workflow activities into a workflow.
|
||||
Finally, compose the application using the workflow.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
[In the following example](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Program.cs), for a basic ASP.NET order processing application using the .NET SDK, your project code would include:
|
||||
[In the following `Program.cs` example](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Program.cs), for a basic ASP.NET order processing application using the .NET SDK, your project code would include:
|
||||
|
||||
- A NuGet package called `Dapr.Workflow` to receive the .NET SDK capabilities
|
||||
- A builder with an extension method called `AddDaprWorkflow`
|
||||
|
|
|
@ -21,16 +21,27 @@ string workflowComponent = "dapr";
|
|||
string workflowName = "OrderProcessingWorkflow";
|
||||
OrderPayload input = new OrderPayload("Paperclips", 99.95);
|
||||
Dictionary<string, string> workflowOptions; // This is an optional parameter
|
||||
CancellationToken cts = CancellationToken.None;
|
||||
|
||||
// Start the workflow. This returns back a "WorkflowReference" which contains the instanceID for the particular workflow instance.
|
||||
WorkflowReference startResponse = await daprClient.StartWorkflowAsync(orderId, workflowComponent, workflowName, input, workflowOptions, cts);
|
||||
// Start the workflow. This returns back a "StartWorkflowResponse" which contains the instance ID for the particular workflow instance.
|
||||
StartWorkflowResponse startResponse = await daprClient.StartWorkflowAsync(orderId, workflowComponent, workflowName, input, workflowOptions);
|
||||
|
||||
// Get information on the workflow. This response will contain information such as the status of the workflow, when it started, and more!
|
||||
// Get information on the workflow. This response contains information such as the status of the workflow, when it started, and more!
|
||||
GetWorkflowResponse getResponse = await daprClient.GetWorkflowAsync(orderId, workflowComponent, workflowName);
|
||||
|
||||
// Terminate the workflow
|
||||
await daprClient.TerminateWorkflowAsync(instanceId, workflowComponent);
|
||||
await daprClient.TerminateWorkflowAsync(orderId, workflowComponent);
|
||||
|
||||
// Raise an event (an incoming purchase order) that your workflow will wait for. This returns the item waiting to be purchased.
|
||||
await daprClient.RaiseWorkflowEventAsync(orderId, workflowComponent, workflowName, input);
|
||||
|
||||
// Pause
|
||||
await daprClient.PauseWorkflowAsync(orderId, workflowComponent);
|
||||
|
||||
// Resume
|
||||
await daprClient.ResumeWorkflowAsync(orderId, workflowComponent);
|
||||
|
||||
// Purge
|
||||
await daprClient.PurgeWorkflowAsync(orderId, workflowComponent);
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
@ -44,24 +55,60 @@ Manage your workflow using HTTP calls. The example below plugs in the properties
|
|||
|
||||
To start your workflow with an ID `12345678`, run:
|
||||
|
||||
```bash
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/12345678/start
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/start?instanceID=12345678
|
||||
```
|
||||
|
||||
Note that workflow instance IDs can only contain alphanumeric characters, underscores, and dashes.
|
||||
|
||||
### Terminate workflow
|
||||
|
||||
To terminate your workflow with an ID `12345678`, run:
|
||||
|
||||
```bash
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678/terminate
|
||||
```
|
||||
|
||||
### Raise an event
|
||||
|
||||
For workflow components that support subscribing to external events, such as the Dapr Workflow engine, you can use the following "raise event" API to deliver a named event to a specific workflow instance.
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceID>/raiseEvent/<eventName>
|
||||
```
|
||||
|
||||
> An `eventName` can be any function.
|
||||
|
||||
### Pause or resume a workflow
|
||||
|
||||
To plan for down-time, wait for inputs, and more, you can pause and then resume a workflow. To pause a workflow with an ID `12345678` until triggered to resume, run:
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678/pause
|
||||
```
|
||||
|
||||
To resume a workflow with an ID `12345678`, run:
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678/resume
|
||||
```
|
||||
|
||||
### Purge a workflow
|
||||
|
||||
The purge API can be used to permanently delete workflow metadata from the underlying state store, including any stored inputs, outputs, and workflow history records. This is often useful for implementing data retention policies and for freeing resources.
|
||||
|
||||
Only workflow instances in the COMPLETED, FAILED, or TERMINATED state can be purged. If the workflow is in any other state, calling purge returns an error.
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678/purge
|
||||
```
|
||||
|
||||
### Get information about a workflow
|
||||
|
||||
To fetch workflow information (outputs and inputs) with an ID `12345678`, run:
|
||||
|
||||
```bash
|
||||
GET http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/12345678
|
||||
```http
|
||||
GET http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678
|
||||
```
|
||||
|
||||
Learn more about these HTTP calls in the [workflow API reference guide]({{< ref workflow_api.md >}}).
|
||||
|
|
|
@ -52,8 +52,10 @@ Each workflow instance managed by the engine is represented as one or more spans
|
|||
|
||||
There are two types of actors that are internally registered within the Dapr sidecar in support of the workflow engine:
|
||||
|
||||
- `dapr.internal.wfengine.workflow`
|
||||
- `dapr.internal.wfengine.activity`
|
||||
- `dapr.internal.{namespace}.{appID}.workflow`
|
||||
- `dapr.internal.{namespace}.{appID}.activity`
|
||||
|
||||
The `{namespace}` value is the Dapr namespace and defaults to `default` if no namespace is configured. The `{appID}` value is the app's ID. For example, if you have a workflow app named "wfapp", then the type of the workflow actor would be `dapr.internal.default.wfapp.workflow` and the type of the activity actor would be `dapr.internal.default.wfapp.activity`.
|
||||
|
||||
The following diagram demonstrates how internal workflow actors operate in a Kubernetes scenario:
|
||||
|
||||
|
@ -61,11 +63,13 @@ The following diagram demonstrates how internal workflow actors operate in a Kub
|
|||
|
||||
Just like user-defined actors, internal workflow actors are distributed across the cluster by the actor placement service. They also maintain their own state and make use of reminders. However, unlike actors that live in application code, these _internal_ actors are embedded into the Dapr sidecar. Application code is completely unaware that these actors exist.
|
||||
|
||||
There are two types of actors registered by the Dapr sidecar for workflow: the _workflow_ actor and the _activity_ actor. The next sections will go into more details on each.
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
The internal workflow actor types are only registered after an app has registered a workflow using a Dapr Workflow SDK. If an app never registers a workflow, then the internal workflow actors are never registered.
|
||||
{{% /alert %}}
|
||||
|
||||
### Workflow actors
|
||||
|
||||
A new instance of the `dapr.internal.wfengine.workflow` actor is activated for every workflow instance that gets created. The ID of the _workflow_ actor is the ID of the workflow. This internal actor stores the state of the workflow as it progresses and determines the node on which the workflow code executes via the actor placement service.
|
||||
Workflow actors are responsible for managing the state and placement of all workflows running in the app. A new instance of the workflow actor is activated for every workflow instance that gets created. The ID of the workflow actor is the ID of the workflow. This internal actor stores the state of the workflow as it progresses and determines the node on which the workflow code executes via the actor placement service.
|
||||
|
||||
Each workflow actor saves its state using the following keys in the configured state store:
|
||||
|
||||
|
@ -94,17 +98,13 @@ To summarize:
|
|||
|
||||
### Activity actors
|
||||
|
||||
A new instance of the `dapr.internal.wfengine.activity` actor is activated for every activity task that gets scheduled by a workflow. The ID of the _activity_ actor is the ID of the workflow combined with a sequence number (sequence numbers start with 0). For example, if a workflow has an ID of `876bf371` and is the third activity to be scheduled by the workflow, it's ID will be `876bf371#2` where `2` is the sequence number.
|
||||
Activity actors are responsible for managing the state and placement of all workflow activity invocations. A new instance of the activity actor is activated for every activity task that gets scheduled by a workflow. The ID of the activity actor is the ID of the workflow combined with a sequence number (sequence numbers start with 0). For example, if a workflow has an ID of `876bf371` and is the third activity to be scheduled by the workflow, it's ID will be `876bf371::2` where `2` is the sequence number.
|
||||
|
||||
Each activity actor stores a single key into the state store:
|
||||
|
||||
| Key | Description |
|
||||
| --- | ----------- |
|
||||
| `activityreq-N` | The key contains the activity invocation payload, which includes the serialized activity input data. The `N` value is a 64-bit unsigned integer that represents the _generation_ of the workflow, a concept which is outside the scope of this documentation. |
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
In the [Alpha release of the Dapr Workflow engine]({{< ref support-preview-features.md >}}), activity actor state will remain in the state store even after the activity task has completed. Scheduling a large number of workflow activities could result in unbounded storage usage. In a future release, data retention policies will be introduced that can automatically purge the state store of completed activity state.
|
||||
{{% /alert %}}
|
||||
| `activityState` | The key contains the activity invocation payload, which includes the serialized activity input data. This key is deleted automatically after the activity invocation has completed. |
|
||||
|
||||
The following diagram illustrates the typical lifecycle of an activity actor.
|
||||
|
||||
|
|
|
@ -105,6 +105,36 @@ Dapr Workflows allow you to schedule reminder-like durable delays for any time r
|
|||
Some APIs in the workflow authoring SDK may internally schedule durable timers to implement internal timeout behavior.
|
||||
{{% /alert %}}
|
||||
|
||||
## Retry policies
|
||||
|
||||
Workflows support durable retry policies for activities and child workflows. Workflow retry policies are separate and distinct from [Dapr resiliency policies]({{< ref "resiliency-overview.md" >}}) in the following ways.
|
||||
|
||||
- Workflow retry policies are configured by the workflow author in code, whereas Dapr Resiliency policies are configured by the application operator in YAML.
|
||||
- Workflow retry policies are durable and maintain their state across application restarts, whereas Dapr Resiliency policies are not durable and must be re-applied after application restarts.
|
||||
- Workflow retry policies are triggered by unhandled errors/exceptions in activities and child workflows, whereas Dapr Resiliency policies are triggered by operation timeouts and connectivity faults.
|
||||
|
||||
Retries are internally implemented using durable timers. This means that workflows can be safely unloaded from memory while waiting for a retry to fire, conserving system resources. This also means that delays between retries can be arbitrarily long, including minutes, hours, or even days.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
The actions performed by a retry policy are saved into a workflow's history. Care must be taken not to change the behavior of a retry policy after a workflow has already been executed. Otherwise, the workflow may behave unexpectedly when replayed. See the notes on [updating workflow code]({{< ref "#updating-workflow-code" >}}) for more information.
|
||||
{{% /alert %}}
|
||||
|
||||
It's possible to use both workflow retry policies and Dapr Resiliency policies together. For example, if a workflow activity uses a Dapr client to invoke a service, the Dapr client uses the configured resiliency policy. See [Quickstart: Service-to-service resiliency]({{< ref "#resiliency-serviceinvo-quickstart" >}}) for more information with an example. However, if the activity itself fails for any reason, including exhausting the retries on the resiliency policy, then the workflow's resiliency policy kicks in.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Using workflow retry policies and resiliency policies together can result in unexpected behavior. For example, if a workflow activity exhausts its configured retry policy, the workflow engine will still retry the activity according to the workflow retry policy. This can result in the activity being retried more times than expected.
|
||||
{{% /alert %}}
|
||||
|
||||
Because workflow retry policies are configured in code, the exact developer experience may vary depending on the version of the workflow SDK. In general, workflow retry policies can be configured with the following parameters.
|
||||
|
||||
| Parameter | Description |
|
||||
| --- | --- |
|
||||
| **Maximum number of attempts** | The maximum number of times to execute the activity or child workflow. |
|
||||
| **First retry interval** | The amount of time to wait before the first retry. |
|
||||
| **Backoff coefficient** | The amount of time to wait before each subsequent retry. |
|
||||
| **Maximum retry interval** | The maximum amount of time to wait before each subsequent retry. |
|
||||
| **Retry timeout** | The overall timeout for retries, regardless of any configured max number of attempts. |
|
||||
|
||||
## External events
|
||||
|
||||
Sometimes workflows will need to wait for events that are raised by external systems. For example, an approval workflow may require a human to explicitly approve an order request within an order processing workflow if the total cost exceeds some threshold. Another example is a trivia game orchestration workflow that pauses while waiting for all participants to submit their answers to trivia questions. These mid-execution inputs are referred to as _external events_.
|
||||
|
|
|
@ -31,25 +31,27 @@ Dapr Workflow solves these complexities by allowing you to implement the task ch
|
|||
|
||||
```csharp
|
||||
// Expotential backoff retry policy that survives long outages
|
||||
var retryPolicy = TaskOptions.FromRetryPolicy(new RetryPolicy(
|
||||
maxNumberOfAttempts: 10,
|
||||
firstRetryInterval: TimeSpan.FromMinutes(1),
|
||||
backoffCoefficient: 2.0,
|
||||
maxRetryInterval: TimeSpan.FromHours(1)));
|
||||
var retryOptions = new WorkflowTaskOptions
|
||||
{
|
||||
RetryPolicy = new WorkflowRetryPolicy(
|
||||
firstRetryInterval: TimeSpan.FromMinutes(1),
|
||||
backoffCoefficient: 2.0,
|
||||
maxRetryInterval: TimeSpan.FromHours(1),
|
||||
maxNumberOfAttempts: 10),
|
||||
};
|
||||
|
||||
// Task failures are surfaced as ordinary .NET exceptions
|
||||
try
|
||||
{
|
||||
var result1 = await context.CallActivityAsync<string>("Step1", wfInput, retryPolicy);
|
||||
var result2 = await context.CallActivityAsync<byte[]>("Step2", result1, retryPolicy);
|
||||
var result3 = await context.CallActivityAsync<long[]>("Step3", result2, retryPolicy);
|
||||
var result4 = await context.CallActivityAsync<Guid[]>("Step4", result3, retryPolicy);
|
||||
var result1 = await context.CallActivityAsync<string>("Step1", wfInput, retryOptions);
|
||||
var result2 = await context.CallActivityAsync<byte[]>("Step2", result1, retryOptions);
|
||||
var result3 = await context.CallActivityAsync<long[]>("Step3", result2, retryOptions);
|
||||
var result4 = await context.CallActivityAsync<Guid[]>("Step4", result3, retryOptions);
|
||||
return string.Join(", ", result4);
|
||||
}
|
||||
catch (TaskFailedException)
|
||||
catch (TaskFailedException) // Task failures are surfaced as TaskFailedException
|
||||
{
|
||||
// Retries expired - apply custom compensation logic
|
||||
await context.CallActivityAsync<long[]>("MyCompensation", options: retryPolicy);
|
||||
await context.CallActivityAsync<long[]>("MyCompensation", options: retryOptions);
|
||||
throw;
|
||||
}
|
||||
```
|
||||
|
@ -78,7 +80,7 @@ In the fan-out/fan-in design pattern, you execute multiple tasks simultaneously
|
|||
|
||||
<img src="/images/workflow-overview/workflows-fanin-fanout.png" width=800 alt="Diagram showing how the fan-out/fan-in workflow pattern works">
|
||||
|
||||
In addition to the challenges mentioned in [the previous pattern]({{< ref "workflow-overview.md#task-chaining" >}}), there are several important questions to consider when implementing the fan-out/fan-in pattern manually:
|
||||
In addition to the challenges mentioned in [the previous pattern]({{< ref "workflow-patterns.md#task-chaining" >}}), there are several important questions to consider when implementing the fan-out/fan-in pattern manually:
|
||||
|
||||
- How do you control the degree of parallelism?
|
||||
- How do you know when to trigger subsequent aggregation steps?
|
||||
|
@ -143,35 +145,33 @@ The Dapr workflow HTTP API supports the asynchronous request-reply pattern out-o
|
|||
The following `curl` commands illustrate how the workflow APIs support this pattern.
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/12345678/start -d '{"input":{"Name":"Paperclips","Quantity":1,"TotalCost":9.95}}'
|
||||
curl -X POST http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/start?instanceID=12345678 -d '{"Name":"Paperclips","Quantity":1,"TotalCost":9.95}'
|
||||
```
|
||||
|
||||
The previous command will result in the following response JSON:
|
||||
|
||||
```json
|
||||
{"instance_id":"12345678"}
|
||||
{"instanceID":"12345678"}
|
||||
```
|
||||
|
||||
The HTTP client can then construct the status query URL using the workflow instance ID and poll it repeatedly until it sees the "COMPLETE", "FAILURE", or "TERMINATED" status in the payload.
|
||||
|
||||
```bash
|
||||
curl http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/12345678
|
||||
curl http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678
|
||||
```
|
||||
|
||||
The following is an example of what an in-progress workflow status might look like.
|
||||
|
||||
```json
|
||||
{
|
||||
"WFInfo": {
|
||||
"instance_id": "12345678"
|
||||
},
|
||||
"start_time": "2023-02-05T00:32:05Z",
|
||||
"metadata": {
|
||||
"instanceID": "12345678",
|
||||
"workflowName": "OrderProcessingWorkflow",
|
||||
"createdAt": "2023-05-03T23:22:11.143069826Z",
|
||||
"lastUpdatedAt": "2023-05-03T23:22:22.460025267Z",
|
||||
"runtimeStatus": "RUNNING",
|
||||
"properties": {
|
||||
"dapr.workflow.custom_status": "",
|
||||
"dapr.workflow.input": "{\"Name\":\"Paperclips\",\"Quantity\":1,\"TotalCost\":9.95}",
|
||||
"dapr.workflow.last_updated": "2023-02-05T00:32:18Z",
|
||||
"dapr.workflow.name": "OrderProcessingWorkflow",
|
||||
"dapr.workflow.runtime_status": "RUNNING"
|
||||
"dapr.workflow.input": "{\"Name\":\"Paperclips\",\"Quantity\":1,\"TotalCost\":9.95}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -182,17 +182,15 @@ If the workflow has completed, the status might look as follows.
|
|||
|
||||
```json
|
||||
{
|
||||
"WFInfo": {
|
||||
"instance_id": "12345678"
|
||||
},
|
||||
"start_time": "2023-02-05T00:32:05Z",
|
||||
"metadata": {
|
||||
"instanceID": "12345678",
|
||||
"workflowName": "OrderProcessingWorkflow",
|
||||
"createdAt": "2023-05-03T23:30:11.381146313Z",
|
||||
"lastUpdatedAt": "2023-05-03T23:30:52.923870615Z",
|
||||
"runtimeStatus": "COMPLETED",
|
||||
"properties": {
|
||||
"dapr.workflow.custom_status": "",
|
||||
"dapr.workflow.input": "{\"Name\":\"Paperclips\",\"Quantity\":1,\"TotalCost\":9.95}",
|
||||
"dapr.workflow.last_updated": "2023-02-05T00:32:23Z",
|
||||
"dapr.workflow.name": "OrderProcessingWorkflow",
|
||||
"dapr.workflow.output": "{\"Processed\":true}",
|
||||
"dapr.workflow.runtime_status": "COMPLETED"
|
||||
"dapr.workflow.output": "{\"Processed\":true}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -266,6 +264,96 @@ A workflow implementing the monitor pattern can loop forever or it can terminate
|
|||
This pattern can also be expressed using actors and reminders. The difference is that this workflow is expressed as a single function with inputs and state stored in local variables. Workflows can also execute a sequence of actions with stronger reliability guarantees, if necessary.
|
||||
{{% /alert %}}
|
||||
|
||||
## External system interaction
|
||||
|
||||
In some cases, a workflow may need to pause and wait for an external system to perform some action. For example, a workflow may need to pause and wait for a payment to be received. In this case, a payment system might publish an event to a pub/sub topic on receipt of a payment, and a listener on that topic can raise an event to the workflow using the [raise event workflow API]({{< ref "howto-manage-workflow.md#raise-an-event" >}}).
|
||||
|
||||
Another very common scenario is when a workflow needs to pause and wait for a human, for example when approving a purchase order. Dapr Workflow supports this event pattern via the [external events]({{< ref "workflow-features-concepts.md#external-events" >}}) feature.
|
||||
|
||||
Here's an example workflow for a purchase order involving a human:
|
||||
|
||||
1. A workflow is triggered when a purchase order is received.
|
||||
1. A rule in the workflow determines that a human needs to perform some action. For example, the purchase order cost exceeds a certain auto-approval threshold.
|
||||
1. The workflow sends a notification requesting a human action. For example, it sends an email with an approval link to a designated approver.
|
||||
1. The workflow pauses and waits for the human to either approve or reject the order by clicking on a link.
|
||||
1. If the approval isn't received within the specified time, the workflow resumes and performs some compensation logic, such as canceling the order.
|
||||
|
||||
The following diagram illustrates this flow.
|
||||
|
||||
<img src="/images/workflow-overview/workflow-human-interaction-pattern.png" width=600 alt="Diagram showing how the external system interaction pattern works with a human involved"/>
|
||||
|
||||
The following example code shows how this pattern can be implemented using Dapr Workflow.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
public override async Task<OrderResult> RunAsync(WorkflowContext context, OrderPayload order)
|
||||
{
|
||||
// ...(other steps)...
|
||||
|
||||
// Require orders over a certain threshold to be approved
|
||||
if (order.TotalCost > OrderApprovalThreshold)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Request human approval for this order
|
||||
await context.CallActivityAsync(nameof(RequestApprovalActivity), order);
|
||||
|
||||
// Pause and wait for a human to approve the order
|
||||
ApprovalResult approvalResult = await context.WaitForExternalEventAsync<ApprovalResult>(
|
||||
eventName: "ManagerApproval",
|
||||
timeout: TimeSpan.FromDays(3));
|
||||
if (approvalResult == ApprovalResult.Rejected)
|
||||
{
|
||||
// The order was rejected, end the workflow here
|
||||
return new OrderResult(Processed: false);
|
||||
}
|
||||
}
|
||||
catch (TaskCanceledException)
|
||||
{
|
||||
// An approval timeout results in automatic order cancellation
|
||||
return new OrderResult(Processed: false);
|
||||
}
|
||||
}
|
||||
|
||||
// ...(other steps)...
|
||||
|
||||
// End the workflow with a success result
|
||||
return new OrderResult(Processed: true);
|
||||
}
|
||||
```
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
In the example above, `RequestApprovalActivity` is the name of a workflow activity to invoke and `ApprovalResult` is an enumeration defined by the workflow app. For brevity, these definitions were left out of the example code.
|
||||
{{% /alert %}}
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
The code that delivers the event to resume the workflow execution is external to the workflow. Workflow events can be delivered to a waiting workflow instance using the [raise event]({{< ref "howto-manage-workflow.md#raise-an-event" >}}) workflow management API, as shown in the following example:
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```csharp
|
||||
// Raise the workflow event to the waiting workflow
|
||||
await daprClient.RaiseWorkflowEventAsync(
|
||||
instanceId: orderId,
|
||||
workflowComponent: "dapr",
|
||||
eventName: "ManagerApproval",
|
||||
eventData: ApprovalResult.Approved);
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
External events don't have to be directly triggered by humans. They can also be triggered by other systems. For example, a workflow may need to pause and wait for a payment to be received. In this case, a payment system might publish an event to a pub/sub topic on receipt of a payment, and a listener on that topic can raise an event to the workflow using the raise event workflow API.
|
||||
|
||||
## Next steps
|
||||
|
||||
{{< button text="Workflow architecture >>" page="workflow-architecture.md" >}}
|
||||
|
|
|
@ -14,4 +14,5 @@ The Dapr SDKs are the easiest way for you to create pluggable components. Choose
|
|||
|
||||
| Language | Status |
|
||||
|----------|:------:|
|
||||
| [Go]({{< ref pluggable-components-go >}}) | In development |
|
||||
| [.NET]({{< ref pluggable-components-dotnet >}}) | In development |
|
||||
|
|
|
@ -8,43 +8,58 @@ aliases:
|
|||
- /developing-applications/integrations/authenticating/authenticating-aws/
|
||||
---
|
||||
|
||||
All Dapr components using various AWS services (DynamoDB, SQS, S3, etc) use a standardized set of attributes for configuration. See [how the AWS SDK (which Dapr uses) handles credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials).
|
||||
All Dapr components using various AWS services (DynamoDB, SQS, S3, etc) use a standardized set of attributes for configuration via the AWS SDK. [Learn more about how the AWS SDK handles credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials).
|
||||
|
||||
None of the following attributes are required, since you can configure the AWS SDK using the default provider chain, described in the link above. Test the component configuration and inspect the log output from the Dapr runtime to ensure that components initialize correctly.
|
||||
Since you can configure the AWS SDK using the default provider chain, all of the following attributes are optional. Test the component configuration and inspect the log output from the Dapr runtime to ensure that components initialize correctly.
|
||||
|
||||
| Attribute | Description |
|
||||
| --------- | ----------- |
|
||||
| `region` | Which AWS region to connect to. In some situations (when running Dapr in self-hosted mode, for example) this flag can be provided by the environment variable `AWS_REGION`. Since Dapr sidecar injection doesn't allow configuring environment variables on the Dapr sidecar, it is recommended to always set the `region` attribute in the component spec. |
|
||||
| `region` | Which AWS region to connect to. In some situations (when running Dapr in self-hosted mode, for example), this flag can be provided by the environment variable `AWS_REGION`. Since Dapr sidecar injection doesn't allow configuring environment variables on the Dapr sidecar, it is recommended to always set the `region` attribute in the component spec. |
|
||||
| `endpoint` | The endpoint is normally handled internally by the AWS SDK. However, in some situations it might make sense to set it locally - for example if developing against [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html). |
|
||||
| `accessKey` | AWS Access key id. |
|
||||
| `secretKey` | AWS Secret access key. Use together with `accessKey` to explicitly specify credentials. |
|
||||
| `sessionToken` | AWS Session token. Used together with `accessKey` and `secretKey`. When using a regular IAM user's access key and secret, a session token is normally not required. |
|
||||
|
||||
{{% alert title="Important" color="warning" %}}
|
||||
When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using.
|
||||
You **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using:
|
||||
- When running the Dapr sidecar (`daprd`) with your application on EKS (AWS Kubernetes)
|
||||
- If using a node/pod that has already been attached to an IAM policy defining access to AWS resources
|
||||
{{% /alert %}}
|
||||
|
||||
## Alternatives to explicitly specifying credentials in component manifest files
|
||||
|
||||
In production scenarios, it is recommended to use a solution such as [Kiam](https://github.com/uswitch/kiam) or [Kube2iam](https://github.com/jtblin/kube2iam). If running on AWS EKS, you can [link an IAM role to a Kubernetes service account](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html), which your pod can use.
|
||||
In production scenarios, it is recommended to use a solution such as:
|
||||
- [Kiam](https://github.com/uswitch/kiam)
|
||||
- [Kube2iam](https://github.com/jtblin/kube2iam)
|
||||
|
||||
If running on AWS EKS, you can [link an IAM role to a Kubernetes service account](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html), which your pod can use.
|
||||
|
||||
All of these solutions solve the same problem: They allow the Dapr runtime process (or sidecar) to retrive credentials dynamically, so that explicit credentials aren't needed. This provides several benefits, such as automated key rotation, and avoiding having to manage secrets.
|
||||
|
||||
Both Kiam and Kube2IAM work by intercepting calls to the [instance metadata service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).
|
||||
|
||||
## Using instance role/profile when running in stand-alone mode on AWS EC2
|
||||
### Use an instance profile when running in stand-alone mode on AWS EC2
|
||||
|
||||
If running Dapr directly on an AWS EC2 instance in stand-alone mode, instance profiles can be used. Simply configure an iam role and [attach it to the instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) for the ec2 instance, and Dapr should be able to authenticate to AWS without specifying credentials in the Dapr component manifest.
|
||||
If running Dapr directly on an AWS EC2 instance in stand-alone mode, you can use instance profiles.
|
||||
|
||||
## Authenticating to AWS when running dapr locally in stand-alone mode
|
||||
1. Configure an IAM role.
|
||||
1. [Attach it to the instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) for the ec2 instance.
|
||||
|
||||
When running Dapr (or the Dapr runtime directly) in stand-alone mode, you have the option of injecting environment variables into the process like this (on Linux/MacOS:
|
||||
Dapr then authenticates to AWS without specifying credentials in the Dapr component manifest.
|
||||
|
||||
### Authenticate to AWS when running dapr locally in stand-alone mode
|
||||
|
||||
{{< tabs "Linux/MacOS" "Windows" >}}
|
||||
<!-- linux -->
|
||||
{{% codetab %}}
|
||||
|
||||
When running Dapr (or the Dapr runtime directly) in stand-alone mode, you can inject environment variables into the process, like the following example:
|
||||
|
||||
```bash
|
||||
FOO=bar daprd --app-id myapp
|
||||
```
|
||||
|
||||
If you have [configured named AWS profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) locally , you can tell Dapr (or the Dapr runtime) which profile to use by specifying the "AWS_PROFILE" environment variable:
|
||||
If you have [configured named AWS profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) locally, you can tell Dapr (or the Dapr runtime) which profile to use by specifying the "AWS_PROFILE" environment variable:
|
||||
|
||||
```bash
|
||||
AWS_PROFILE=myprofile dapr run...
|
||||
|
@ -58,11 +73,27 @@ AWS_PROFILE=myprofile daprd...
|
|||
|
||||
You can use any of the [supported environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html#envvars-list) to configure Dapr in this manner.
|
||||
|
||||
On Windows, the environment variable needs to be set before starting the `dapr` or `daprd` command, doing it inline as shown above is not supported.
|
||||
{{% /codetab %}}
|
||||
|
||||
## Authenticating to AWS if using AWS SSO based profiles
|
||||
<!-- windows -->
|
||||
{{% codetab %}}
|
||||
|
||||
If you authenticate to AWS using [AWS SSO](https://aws.amazon.com/single-sign-on/), some AWS SDKs (including the Go SDK) don't yet support this natively. There are several utilities you can use to "bridge the gap" between AWS SSO-based credentials, and "legacy" credentials, such as [AwsHelper](https://pypi.org/project/awshelper/) or [aws-sso-util](https://github.com/benkehoe/aws-sso-util).
|
||||
On Windows, the environment variable needs to be set before starting the `dapr` or `daprd` command, doing it inline (like in Linux/MacOS) is not supported.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
### Authenticate to AWS if using AWS SSO based profiles
|
||||
|
||||
If you authenticate to AWS using [AWS SSO](https://aws.amazon.com/single-sign-on/), some AWS SDKs (including the Go SDK) don't yet support this natively. There are several utilities you can use to "bridge the gap" between AWS SSO-based credentials and "legacy" credentials, such as:
|
||||
- [AwsHelper](https://pypi.org/project/awshelper/)
|
||||
- [aws-sso-util](https://github.com/benkehoe/aws-sso-util)
|
||||
|
||||
{{< tabs "Linux/MacOS" "Windows" >}}
|
||||
<!-- linux -->
|
||||
{{% codetab %}}
|
||||
|
||||
If using AwsHelper, start Dapr like this:
|
||||
|
||||
|
@ -75,7 +106,21 @@ or
|
|||
```bash
|
||||
AWS_PROFILE=myprofile awshelper daprd...
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
On Windows, the environment variable needs to be set before starting the `awshelper` command, doing it inline as shown above is not supported.
|
||||
<!-- windows -->
|
||||
{{% codetab %}}
|
||||
|
||||
On Windows, the environment variable needs to be set before starting the `awshelper` command, doing it inline (like in Linxu/MacOS) is not supported.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Next steps
|
||||
|
||||
{{< button text="Refer to AWS component specs >>" page="components-reference" >}}
|
||||
|
||||
## Related links
|
||||
|
||||
For more information, see [how the AWS SDK (which Dapr uses) handles credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials).
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
type: docs
|
||||
title: "Integrations with Azure"
|
||||
linkTitle: "Azure"
|
||||
weight: 1500
|
||||
weight: 1000
|
||||
description: "Dapr integrations with Azure services"
|
||||
---
|
|
@ -1,401 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Authenticating to Azure"
|
||||
linkTitle: "Authenticating to Azure"
|
||||
description: "How to authenticate Azure components using Azure AD and/or Managed Identities"
|
||||
aliases:
|
||||
- "/operations/components/setup-secret-store/supported-secret-stores/azure-keyvault-managed-identity/"
|
||||
- "/reference/components-reference/supported-secret-stores/azure-keyvault-managed-identity/"
|
||||
weight: 1000
|
||||
---
|
||||
|
||||
## Common Azure authentication layer
|
||||
|
||||
Certain Azure components for Dapr offer support for the *common Azure authentication layer*, which enables applications to access data stored in Azure resources by authenticating with Azure AD. Thanks to this, administrators can leverage all the benefits of fine-tuned permissions with RBAC (Role-Based Access Control), and applications running on certain Azure services such as Azure VMs, Azure Kubernetes Service, or many Azure platform services can leverage [Managed Service Identities (MSI)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
Some Azure components offer alternative authentication methods, such as systems based on "master keys" or "shared keys". Whenever possible, it is recommended that you authenticate your Dapr components using Azure AD for increased security and ease of management, as well as for the ability to leverage MSI if your app is running on supported Azure services.
|
||||
|
||||
> Currently, only a subset of Azure components for Dapr offer support for this authentication method. Over time, support will be expanded to all other Azure components for Dapr. You can track the progress of the work, component-by-component, on [this issue](https://github.com/dapr/components-contrib/issues/1103).
|
||||
|
||||
### About authentication with Azure AD
|
||||
|
||||
Azure AD is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services.
|
||||
|
||||
Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Key Vault, Cosmos DB, etc. In the Azure terminology, an application is also called a "Service Principal".
|
||||
|
||||
Many of the services listed above also support authentication using other systems, such as "master keys" or "shared keys". Although those are always valid methods to authenticate your application (and Dapr continues to support them, as explained in each component's reference page), using Azure AD when possible offers various benefits, including:
|
||||
|
||||
- The ability to leverage Managed Service Identities, which allow your application to authenticate with Azure AD, and obtain an access token to make requests to Azure services, without the need to use any credential. When your application is running on a supported Azure service (including, but not limited to, Azure VMs, Azure Kubernetes Service, Azure Web Apps, etc), an identity for your application can be assigned at the infrastructure level.
|
||||
This way, your code does not have to deal with credentials of any kind, removing the challenge of safely managing credentials, allowing greater separation of concerns between development and operations teams and reducing the number of people with access to credentials, and lastly simplifying operational aspects–especially when multiple environments are used.
|
||||
- Using RBAC (Role-Based Access Control) with supported services (such as Azure Storage and Cosmos DB), permissions given to an application can be fine-tuned, for example allowing restricting access to a subset of data or making it read-only.
|
||||
- Better auditing for access.
|
||||
- Ability to authenticate using certificates (optional).
|
||||
|
||||
## Credentials metadata fields
|
||||
|
||||
To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your Dapr component (read the next section for how to create them). There are multiple options depending on the way you have chosen to pass the credentials to your Dapr service.
|
||||
|
||||
**Authenticating using client credentials:**
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|---------------------|----------|--------------------------------------|----------------------------------------------|
|
||||
| `azureTenantId` | Y | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` |
|
||||
| `azureClientId` | Y | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
| `azureClientSecret` | Y | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` |
|
||||
|
||||
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
|
||||
|
||||
**Authenticating using a PFX certificate:**
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------|--------|--------|--------|
|
||||
| `azureTenantId` | Y | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` |
|
||||
| `azureClientId` | Y | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
| `azureCertificate` | One of `azureCertificate` and `azureCertificateFile` | Certificate and private key (in PFX/PKCS#12 format) | `"-----BEGIN PRIVATE KEY-----\n MIIEvgI... \n -----END PRIVATE KEY----- \n -----BEGIN CERTIFICATE----- \n MIICoTC... \n -----END CERTIFICATE-----` |
|
||||
| `azureCertificateFile` | One of `azureCertificate` and `azureCertificateFile` | Path to the PFX/PKCS#12 file containing the certificate and private key | `"/path/to/file.pem"` |
|
||||
| `azureCertificatePassword` | N | Password for the certificate if encrypted | `"password"` |
|
||||
|
||||
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
|
||||
|
||||
**Authenticating with Managed Service Identities (MSI):**
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|-----------------|----------|----------------------------|------------------------------------------|
|
||||
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
|
||||
Using MSI you're not required to specify any value, although you may optionally pass `azureClientId` if needed.
|
||||
|
||||
### Aliases
|
||||
|
||||
For backwards-compatibility reasons, the following values in the metadata are supported as aliases, although their use is discouraged.
|
||||
|
||||
| Metadata key | Aliases (supported but deprecated) |
|
||||
|----------------------------|------------------------------------|
|
||||
| `azureTenantId` | `spnTenantId`, `tenantId` |
|
||||
| `azureClientId` | `spnClientId`, `clientId` |
|
||||
| `azureClientSecret` | `spnClientSecret`, `clientSecret` |
|
||||
| `azureCertificate` | `spnCertificate` |
|
||||
| `azureCertificateFile` | `spnCertificateFile` |
|
||||
| `azureCertificatePassword` | `spnCertificatePassword` |
|
||||
|
||||
## Generating a new Azure AD application and Service Principal
|
||||
|
||||
To start, create a new Azure AD application, which will also be used as Service Principal.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Azure Subscription
|
||||
- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli)
|
||||
- [jq](https://stedolan.github.io/jq/download/)
|
||||
- OpenSSL (included by default on all Linux and macOS systems, as well as on WSL)
|
||||
- The scripts below are optimized for a bash or zsh shell
|
||||
|
||||
> If you haven't already, start by logging in to Azure using the Azure CLI:
|
||||
>
|
||||
> ```sh
|
||||
> # Log in Azure
|
||||
> az login
|
||||
> # Set your default subscription
|
||||
> az account set -s [your subscription id]
|
||||
> ```
|
||||
|
||||
### Creating an Azure AD application
|
||||
|
||||
First, create the Azure AD application with:
|
||||
|
||||
```sh
|
||||
# Friendly name for the application / Service Principal
|
||||
APP_NAME="dapr-application"
|
||||
|
||||
# Create the app
|
||||
APP_ID=$(az ad app create --display-name "${APP_NAME}" | jq -r .appId)
|
||||
```
|
||||
|
||||
{{< tabs "Client secret" "Certificate">}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
To create a **client secret**, then run this command. This will generate a random password based on the base64 charset and 40-characters long. Additionally, it will make the password valid for 2 years, before it will need to be rotated:
|
||||
|
||||
```sh
|
||||
az ad app credential reset \
|
||||
--id "${APP_ID}" \
|
||||
--years 2
|
||||
```
|
||||
|
||||
The output of the command above will be similar to this:
|
||||
|
||||
```json
|
||||
{
|
||||
"appId": "c7dd251f-811f-4ba2-a905-acd4d3f8f08b",
|
||||
"password": "Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E",
|
||||
"tenant": "cd4b2887-304c-47e1-b4d5-65447fdd542b"
|
||||
}
|
||||
```
|
||||
|
||||
Take note of the values above, which you'll need to use in your Dapr components' metadata, to allow Dapr to authenticate with Azure:
|
||||
|
||||
- `appId` is the value for `azureClientId`
|
||||
- `password` is the value for `azureClientSecret` (this was randomly-generated)
|
||||
- `tenant` is the value for `azureTenantId`
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
If you'd rather use a **PFX (PKCS#12) certificate**, run this command which will create a self-signed certificate:
|
||||
|
||||
```sh
|
||||
az ad app credential reset \
|
||||
--id "${APP_ID}" \
|
||||
--create-cert
|
||||
```
|
||||
|
||||
> Note: self-signed certificates are recommended for development only. For production, you should use certificates signed by a CA and imported with the `--cert` flag.
|
||||
|
||||
The output of the command above should look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"appId": "c7dd251f-811f-4ba2-a905-acd4d3f8f08b",
|
||||
"fileWithCertAndPrivateKey": "/Users/alessandro/tmpgtdgibk4.pem",
|
||||
"password": null,
|
||||
"tenant": "cd4b2887-304c-47e1-b4d5-65447fdd542b"
|
||||
}
|
||||
```
|
||||
|
||||
Take note of the values above, which you'll need to use in your Dapr components' metadata:
|
||||
|
||||
- `appId` is the value for `azureClientId`
|
||||
- `tenant` is the value for `azureTenantId`
|
||||
- The self-signed PFX certificate and private key are written in the file at the path specified in `fileWithCertAndPrivateKey`.
|
||||
Use the contents of that file as `azureCertificate` (or write it to a file on the server and use `azureCertificateFile`)
|
||||
|
||||
> While the generated file has the `.pem` extension, it contains a certificate and private key encoded as PFX (PKCS#12).
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Creating a Service Principal
|
||||
|
||||
Once you have created an Azure AD application, create a Service Principal for that application, which will allow us to grant it access to Azure resources. Run:
|
||||
|
||||
```sh
|
||||
SERVICE_PRINCIPAL_ID=$(az ad sp create \
|
||||
--id "${APP_ID}" \
|
||||
| jq -r .id)
|
||||
echo "Service Principal ID: ${SERVICE_PRINCIPAL_ID}"
|
||||
```
|
||||
|
||||
The output will be similar to:
|
||||
|
||||
```text
|
||||
Service Principal ID: 1d0ccf05-5427-4b5e-8eb4-005ac5f9f163
|
||||
```
|
||||
|
||||
Note that the value above is the ID of the **Service Principal** which is different from the ID of application in Azure AD (client ID)! The former is defined within an Azure tenant and is used to grant access to Azure resources to an application. The client ID instead is used by your application to authenticate. To sum things up:
|
||||
|
||||
- You'll use the client ID in Dapr manifests to configure authentication with Azure services
|
||||
- You'll use the Service Principal ID to grant permissions to an application to access Azure resources
|
||||
|
||||
Keep in mind that the Service Principal that was just created does not have access to any Azure resource by default. Access will need to be granted to each resource as needed, as documented in the docs for the components.
|
||||
|
||||
> Note: this step is different from the [official documentation](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli) as the short-hand commands included there create a Service Principal that has broad read-write access to all Azure resources in your subscription.
|
||||
> Not only doing that would grant our Service Principal more access than you are likely going to desire, but this also applies only to the Azure management plane (Azure Resource Manager, or ARM), which is irrelevant for Dapr anyways (all Azure components are designed to interact with the data plane of various services, and not ARM).
|
||||
|
||||
### Example usage in a Dapr component
|
||||
|
||||
In this example, you will set up an Azure Key Vault secret store component that uses Azure AD to authenticate.
|
||||
|
||||
{{< tabs "Self-Hosted" "Kubernetes">}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
To use a **client secret**, create a file called `azurekeyvault.yaml` in the components directory, filling in with the details from the above setup process:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureClientSecret
|
||||
value : "[your_client_secret]"
|
||||
```
|
||||
|
||||
If you want to use a **certificate** saved on the local disk, instead, use:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureCertificateFile
|
||||
value : "[pfx_certificate_file_fully_qualified_local_path]"
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
In Kubernetes, you store the client secret or the certificate into the Kubernetes Secret Store and then refer to those in the YAML file.
|
||||
|
||||
To use a **client secret**:
|
||||
|
||||
1. Create a Kubernetes secret using the following command:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic [your_k8s_secret_name] --from-literal=[your_k8s_secret_key]=[your_client_secret]
|
||||
```
|
||||
|
||||
- `[your_client_secret]` is the application's client secret as generated above
|
||||
- `[your_k8s_secret_name]` is secret name in the Kubernetes secret store
|
||||
- `[your_k8s_secret_key]` is secret key in the Kubernetes secret store
|
||||
|
||||
2. Create an `azurekeyvault.yaml` component file.
|
||||
|
||||
The component yaml refers to the Kubernetes secretstore using `auth` property and `secretKeyRef` refers to the client secret stored in the Kubernetes secret store.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureClientSecret
|
||||
secretKeyRef:
|
||||
name: "[your_k8s_secret_name]"
|
||||
key: "[your_k8s_secret_key]"
|
||||
auth:
|
||||
secretStore: kubernetes
|
||||
```
|
||||
|
||||
3. Apply the `azurekeyvault.yaml` component:
|
||||
|
||||
```bash
|
||||
kubectl apply -f azurekeyvault.yaml
|
||||
```
|
||||
|
||||
To use a **certificate**:
|
||||
|
||||
1. Create a Kubernetes secret using the following command:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic [your_k8s_secret_name] --from-file=[your_k8s_secret_key]=[pfx_certificate_file_fully_qualified_local_path]
|
||||
```
|
||||
|
||||
- `[pfx_certificate_file_fully_qualified_local_path]` is the path to the PFX file you obtained earlier
|
||||
- `[your_k8s_secret_name]` is secret name in the Kubernetes secret store
|
||||
- `[your_k8s_secret_key]` is secret key in the Kubernetes secret store
|
||||
|
||||
2. Create an `azurekeyvault.yaml` component file.
|
||||
|
||||
The component yaml refers to the Kubernetes secretstore using `auth` property and `secretKeyRef` refers to the certificate stored in the Kubernetes secret store.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureCertificate
|
||||
secretKeyRef:
|
||||
name: "[your_k8s_secret_name]"
|
||||
key: "[your_k8s_secret_key]"
|
||||
auth:
|
||||
secretStore: kubernetes
|
||||
```
|
||||
|
||||
3. Apply the `azurekeyvault.yaml` component:
|
||||
|
||||
```bash
|
||||
kubectl apply -f azurekeyvault.yaml
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Using Managed Service Identities
|
||||
|
||||
Using MSI, authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity. For example, when you create an Azure VM or an Azure Kubernetes Service cluster and choose to enable a managed identity for that, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credential.
|
||||
|
||||
To get started with managed identities, first you need to assign an identity to a new or existing Azure resource. The instructions depend on the service use. Below are links to the official documentation:
|
||||
|
||||
- [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/use-managed-identity)
|
||||
- [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) (including Azure Web Apps and Azure Functions)
|
||||
- [Azure Virtual Machines (VM)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vm)
|
||||
- [Azure Virtual Machines Scale Sets (VMSS)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vmss)
|
||||
- [Azure Container Instance (ACI)](https://docs.microsoft.com/azure/container-instances/container-instances-managed-identity)
|
||||
|
||||
Other Azure application services may offer support for MSI; please check the documentation for those services to understand how to configure them.
|
||||
|
||||
After assigning a managed identity to your Azure resource, you will have credentials such as:
|
||||
|
||||
```json
|
||||
{
|
||||
"principalId": "<object-id>",
|
||||
"tenantId": "<tenant-id>",
|
||||
"type": "SystemAssigned",
|
||||
"userAssignedIdentities": null
|
||||
}
|
||||
```
|
||||
|
||||
From the list above, take note of **`principalId`** which is the ID of the Service Principal that was created. You'll need that to grant access to Azure resources to your Service Principal.
|
||||
|
||||
## Support for other Azure environments
|
||||
|
||||
By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China, Azure Government, or Azure Germany, you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values:
|
||||
|
||||
- Azure public cloud (default): `"AZUREPUBLICCLOUD"`
|
||||
- Azure China: `"AZURECHINACLOUD"`
|
||||
- Azure Government: `"AZUREUSGOVERNMENTCLOUD"`
|
||||
- Azure Germany: `"AZUREGERMANCLOUD"`
|
||||
|
||||
## References
|
||||
|
||||
- [Azure AD app credential: Azure CLI reference](https://docs.microsoft.com/cli/azure/ad/app/credential)
|
||||
- [Azure Managed Service Identity (MSI) overview](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview)
|
||||
- [Secrets building block]({{< ref secrets >}})
|
||||
- [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}})
|
||||
- [How-To: Reference secrets in Dapr components]({{< ref component-secrets.md >}})
|
||||
- [Secrets API reference]({{< ref secrets_api.md >}})
|
|
@ -6,6 +6,11 @@ description: "Publish APIs for Dapr services and components through Azure API Ma
|
|||
weight: 2000
|
||||
---
|
||||
|
||||
Azure API Management (APIM) is a way to create consistent and modern API gateways for back-end services, including those built with Dapr. Dapr support can be enabled in self-hosted API Management gateways to allow them to forward requests to Dapr services, send messages to Dapr Pub/Sub topics, or trigger Dapr output bindings. For more information, read the guide on [API Management Dapr Integration policies](https://docs.microsoft.com/azure/api-management/api-management-dapr-policies) and try out the [Dapr & Azure API Management Integration Demo](https://github.com/dapr/samples/tree/master/dapr-apim-integration).
|
||||
[Azure API Management](https://learn.microsoft.com/azure/api-management/api-management-key-concepts) is a way to create consistent and modern API gateways for back-end services, including those built with Dapr. You can enable Dapr support in self-hosted API Management gateways to allow them to:
|
||||
- Forward requests to Dapr services
|
||||
- Send messages to Dapr Pub/Sub topics
|
||||
- Trigger Dapr output bindings
|
||||
|
||||
{{< button text="Learn more" link="https://docs.microsoft.com/azure/api-management/api-management-dapr-policies" >}}
|
||||
Try out the [Dapr & Azure API Management Integration sample](https://github.com/dapr/samples/tree/master/dapr-apim-integration).
|
||||
|
||||
{{< button text="Learn more about Dapr integration policies" link="https://docs.microsoft.com/azure/api-management/api-management-dapr-policies" >}}
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Authenticate to Azure"
|
||||
linkTitle: "Authenticate to Azure"
|
||||
weight: 1600
|
||||
description: "Learn about authenticating Azure components using Azure Active Directory or Managed Service Identities"
|
||||
---
|
|
@ -0,0 +1,273 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Authenticating to Azure"
|
||||
linkTitle: "Overview"
|
||||
description: "How to authenticate Azure components using Azure AD and/or Managed Identities"
|
||||
aliases:
|
||||
- "/operations/components/setup-secret-store/supported-secret-stores/azure-keyvault-managed-identity/"
|
||||
- "/reference/components-reference/supported-secret-stores/azure-keyvault-managed-identity/"
|
||||
weight: 10000
|
||||
---
|
||||
|
||||
Certain Azure components for Dapr offer support for the *common Azure authentication layer*, which enables applications to access data stored in Azure resources by authenticating with Azure Active Directory (Azure AD). Thanks to this:
|
||||
- Administrators can leverage all the benefits of fine-tuned permissions with Role-Based Access Control (RBAC).
|
||||
- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Service Identities (MSI)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
|
||||
## About authentication with Azure AD
|
||||
|
||||
Azure AD is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services.
|
||||
|
||||
Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Key Vault, Cosmos DB, etc.
|
||||
|
||||
> In Azure terminology, an application is also called a "Service Principal".
|
||||
|
||||
Some Azure components offer alternative authentication methods, such as systems based on "master keys" or "shared keys". Although both master keys and shared keys are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD. Using Azure AD offers benefits like the following.
|
||||
|
||||
### Managed Service Identities
|
||||
|
||||
With Managed Service Identities (MSI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service, an identity for your application can be assigned at the infrastructure level.
|
||||
|
||||
Once using MSI, your code doesn't have to deal with credentials, which:
|
||||
- Removes the challenge of managing credentials safely
|
||||
- Allows greater separation of concerns between development and operations teams
|
||||
- Reduces the number of people with access to credentials
|
||||
- Simplifies operational aspects–especially when multiple environments are used
|
||||
|
||||
### Role-based Access Control
|
||||
|
||||
When using Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make it read-only.
|
||||
|
||||
### Auditing
|
||||
|
||||
Using Azure AD provides an improved auditing experience for access.
|
||||
|
||||
### (Optional) Authenticate using certificates
|
||||
|
||||
While Azure AD allows you to use MSI or RBAC, you still have the option to authenticate using certificates.
|
||||
|
||||
## Support for other Azure environments
|
||||
|
||||
By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China, Azure Government, or Azure Germany, you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values:
|
||||
|
||||
- Azure public cloud (default): `"AZUREPUBLICCLOUD"`
|
||||
- Azure China: `"AZURECHINACLOUD"`
|
||||
- Azure Government: `"AZUREUSGOVERNMENTCLOUD"`
|
||||
- Azure Germany: `"AZUREGERMANCLOUD"`
|
||||
|
||||
## Credentials metadata fields
|
||||
|
||||
To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component]({{< ref "#example-usage-in-a-dapr-component" >}}).
|
||||
|
||||
### Metadata options
|
||||
|
||||
Depending on how you've passed credentials to your Dapr services, you have multiple metadata options.
|
||||
|
||||
#### Authenticating using client credentials
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|---------------------|----------|--------------------------------------|----------------------------------------------|
|
||||
| `azureTenantId` | Y | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` |
|
||||
| `azureClientId` | Y | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
| `azureClientSecret` | Y | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` |
|
||||
|
||||
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
|
||||
|
||||
#### Authenticating using a PFX certificate
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------|--------|--------|--------|
|
||||
| `azureTenantId` | Y | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` |
|
||||
| `azureClientId` | Y | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
| `azureCertificate` | One of `azureCertificate` and `azureCertificateFile` | Certificate and private key (in PFX/PKCS#12 format) | `"-----BEGIN PRIVATE KEY-----\n MIIEvgI... \n -----END PRIVATE KEY----- \n -----BEGIN CERTIFICATE----- \n MIICoTC... \n -----END CERTIFICATE-----` |
|
||||
| `azureCertificateFile` | One of `azureCertificate` and `azureCertificateFile` | Path to the PFX/PKCS#12 file containing the certificate and private key | `"/path/to/file.pem"` |
|
||||
| `azureCertificatePassword` | N | Password for the certificate if encrypted | `"password"` |
|
||||
|
||||
When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above.
|
||||
|
||||
#### Authenticating with Managed Service Identities (MSI)
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|-----------------|----------|----------------------------|------------------------------------------|
|
||||
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` |
|
||||
|
||||
Using MSI, you're not required to specify any value, although you may pass `azureClientId` if needed.
|
||||
|
||||
### Aliases
|
||||
|
||||
For backwards-compatibility reasons, the following values in the metadata are supported as aliases. Their use is discouraged.
|
||||
|
||||
| Metadata key | Aliases (supported but deprecated) |
|
||||
|----------------------------|------------------------------------|
|
||||
| `azureTenantId` | `spnTenantId`, `tenantId` |
|
||||
| `azureClientId` | `spnClientId`, `clientId` |
|
||||
| `azureClientSecret` | `spnClientSecret`, `clientSecret` |
|
||||
| `azureCertificate` | `spnCertificate` |
|
||||
| `azureCertificateFile` | `spnCertificateFile` |
|
||||
| `azureCertificatePassword` | `spnCertificatePassword` |
|
||||
|
||||
|
||||
### Example usage in a Dapr component
|
||||
|
||||
In this example, you will set up an Azure Key Vault secret store component that uses Azure AD to authenticate.
|
||||
|
||||
{{< tabs "Self-Hosted" "Kubernetes">}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
To use a **client secret**, create a file called `azurekeyvault.yaml` in the components directory, filling in with the details from the above setup process:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureClientSecret
|
||||
value : "[your_client_secret]"
|
||||
```
|
||||
|
||||
If you want to use a **certificate** saved on the local disk, instead, use:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureCertificateFile
|
||||
value : "[pfx_certificate_file_fully_qualified_local_path]"
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
In Kubernetes, you store the client secret or the certificate into the Kubernetes Secret Store and then refer to those in the YAML file.
|
||||
|
||||
To use a **client secret**:
|
||||
|
||||
1. Create a Kubernetes secret using the following command:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic [your_k8s_secret_name] --from-literal=[your_k8s_secret_key]=[your_client_secret]
|
||||
```
|
||||
|
||||
- `[your_client_secret]` is the application's client secret as generated above
|
||||
- `[your_k8s_secret_name]` is secret name in the Kubernetes secret store
|
||||
- `[your_k8s_secret_key]` is secret key in the Kubernetes secret store
|
||||
|
||||
1. Create an `azurekeyvault.yaml` component file.
|
||||
|
||||
The component yaml refers to the Kubernetes secretstore using `auth` property and `secretKeyRef` refers to the client secret stored in the Kubernetes secret store.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureClientSecret
|
||||
secretKeyRef:
|
||||
name: "[your_k8s_secret_name]"
|
||||
key: "[your_k8s_secret_key]"
|
||||
auth:
|
||||
secretStore: kubernetes
|
||||
```
|
||||
|
||||
1. Apply the `azurekeyvault.yaml` component:
|
||||
|
||||
```bash
|
||||
kubectl apply -f azurekeyvault.yaml
|
||||
```
|
||||
|
||||
To use a **certificate**:
|
||||
|
||||
1. Create a Kubernetes secret using the following command:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic [your_k8s_secret_name] --from-file=[your_k8s_secret_key]=[pfx_certificate_file_fully_qualified_local_path]
|
||||
```
|
||||
|
||||
- `[pfx_certificate_file_fully_qualified_local_path]` is the path to the PFX file you obtained earlier
|
||||
- `[your_k8s_secret_name]` is secret name in the Kubernetes secret store
|
||||
- `[your_k8s_secret_key]` is secret key in the Kubernetes secret store
|
||||
|
||||
1. Create an `azurekeyvault.yaml` component file.
|
||||
|
||||
The component yaml refers to the Kubernetes secretstore using `auth` property and `secretKeyRef` refers to the certificate stored in the Kubernetes secret store.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azurekeyvault
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.azure.keyvault
|
||||
version: v1
|
||||
metadata:
|
||||
- name: vaultName
|
||||
value: "[your_keyvault_name]"
|
||||
- name: azureTenantId
|
||||
value: "[your_tenant_id]"
|
||||
- name: azureClientId
|
||||
value: "[your_client_id]"
|
||||
- name: azureCertificate
|
||||
secretKeyRef:
|
||||
name: "[your_k8s_secret_name]"
|
||||
key: "[your_k8s_secret_key]"
|
||||
auth:
|
||||
secretStore: kubernetes
|
||||
```
|
||||
|
||||
1. Apply the `azurekeyvault.yaml` component:
|
||||
|
||||
```bash
|
||||
kubectl apply -f azurekeyvault.yaml
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Next steps
|
||||
|
||||
{{< button text="Generate a new Azure AD application and Service Principal >>" page="howto-aad.md" >}}
|
||||
|
||||
## References
|
||||
|
||||
- [Azure AD app credential: Azure CLI reference](https://docs.microsoft.com/cli/azure/ad/app/credential)
|
||||
- [Azure Managed Service Identity (MSI) overview](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview)
|
||||
- [Secrets building block]({{< ref secrets >}})
|
||||
- [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}})
|
||||
- [How-To: Reference secrets in Dapr components]({{< ref component-secrets.md >}})
|
||||
- [Secrets API reference]({{< ref secrets_api.md >}})
|
|
@ -0,0 +1,147 @@
|
|||
---
|
||||
type: docs
|
||||
title: "How to: Generate a new Azure AD application and Service Principal"
|
||||
linkTitle: "How to: Generate Azure AD and Service Principal"
|
||||
weight: 30000
|
||||
description: "Learn how to generate an Azure Active Directory and use it as a Service Principal"
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [An Azure subscription](https://azure.microsoft.com/free/)
|
||||
- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli)
|
||||
- [jq](https://stedolan.github.io/jq/download/)
|
||||
- OpenSSL (included by default on all Linux and macOS systems, as well as on WSL)
|
||||
- Make sure you're using a bash or zsh shell
|
||||
|
||||
## Log into Azure using the Azure CLI
|
||||
|
||||
In a new terminal, run the following command:
|
||||
|
||||
```sh
|
||||
az login
|
||||
az account set -s [your subscription id]
|
||||
```
|
||||
|
||||
### Create an Azure AD application
|
||||
|
||||
Create the Azure AD application with:
|
||||
|
||||
```sh
|
||||
# Friendly name for the application / Service Principal
|
||||
APP_NAME="dapr-application"
|
||||
|
||||
# Create the app
|
||||
APP_ID=$(az ad app create --display-name "${APP_NAME}" | jq -r .appId)
|
||||
```
|
||||
|
||||
Select how you'd prefer to pass credentials.
|
||||
|
||||
{{< tabs "Client secret" "PFX certificate">}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
To create a **client secret**, run the following command.
|
||||
|
||||
```sh
|
||||
az ad app credential reset \
|
||||
--id "${APP_ID}" \
|
||||
--years 2
|
||||
```
|
||||
|
||||
This generates a random, 40-characters long password based on the `base64` charset. This password will be valid for 2 years, before you need to rotate it.
|
||||
|
||||
Save the output values returned; you'll need them for Dapr to authenticate with Azure. The expected output:
|
||||
|
||||
```json
|
||||
{
|
||||
"appId": "<your-app-id>",
|
||||
"password": "<your-password>",
|
||||
"tenant": "<your-azure-tenant>"
|
||||
}
|
||||
```
|
||||
|
||||
When adding the returned values to your Dapr component's metadata:
|
||||
- `appId` is the value for `azureClientId`
|
||||
- `password` is the value for `azureClientSecret` (this was randomly-generated)
|
||||
- `tenant` is the value for `azureTenantId`
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
For a **PFX (PKCS#12) certificate**, run the following command to create a self-signed certificate:
|
||||
|
||||
```sh
|
||||
az ad app credential reset \
|
||||
--id "${APP_ID}" \
|
||||
--create-cert
|
||||
```
|
||||
|
||||
> **Note:** Self-signed certificates are recommended for development only. For production, you should use certificates signed by a CA and imported with the `--cert` flag.
|
||||
|
||||
The output of the command above should look like:
|
||||
|
||||
Save the output values returned; you'll need them for Dapr to authenticate with Azure. The expected output:
|
||||
|
||||
```json
|
||||
{
|
||||
"appId": "<your-app-id>",
|
||||
"fileWithCertAndPrivateKey": "<file-path>",
|
||||
"password": null,
|
||||
"tenant": "<your-azure-tenant>"
|
||||
}
|
||||
```
|
||||
|
||||
When adding the returned values to your Dapr component's metadata:
|
||||
- `appId` is the value for `azureClientId`
|
||||
- `tenant` is the value for `azureTenantId`
|
||||
- `fileWithCertAndPrivateKey` indicates the location of the self-signed PFX certificate and private key. Use the contents of that file as `azureCertificate` (or write it to a file on the server and use `azureCertificateFile`)
|
||||
|
||||
> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as _PFX (PKCS#12)_.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Create a Service Principal
|
||||
|
||||
Once you have created an Azure AD application, create a Service Principal for that application. With this Service Principal, you can grant it access to Azure resources.
|
||||
|
||||
To create the Service Principal, run the following command:
|
||||
|
||||
```sh
|
||||
SERVICE_PRINCIPAL_ID=$(az ad sp create \
|
||||
--id "${APP_ID}" \
|
||||
| jq -r .id)
|
||||
echo "Service Principal ID: ${SERVICE_PRINCIPAL_ID}"
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```text
|
||||
Service Principal ID: 1d0ccf05-5427-4b5e-8eb4-005ac5f9f163
|
||||
```
|
||||
|
||||
The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID).
|
||||
|
||||
**The Service Principal ID** is:
|
||||
- Defined within an Azure tenant
|
||||
- Used to grant access to Azure resources to an application
|
||||
|
||||
You'll use the Service Principal ID to grant permissions to an application to access Azure resources.
|
||||
|
||||
Meanwhile, **the client ID** is used by your application to authenticate. You'll use the client ID in Dapr manifests to configure authentication with Azure services.
|
||||
|
||||
Keep in mind that the Service Principal that was just created does not have access to any Azure resource by default. Access will need to be granted to each resource as needed, as documented in the docs for the components.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
This step is different from the [official Azure documentation](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli). The short-hand commands included in the official documentation creates a Service Principal that has broad `read-write` access to all Azure resources in your subscription, which:
|
||||
|
||||
- Grants your Service Principal more access than you likely desire.
|
||||
- Applies _only_ to the Azure management plane (Azure Resource Manager, or ARM), which is irrelevant for Dapr components, which are designed to interact with the data plane of various services.
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
## Next steps
|
||||
|
||||
{{< button text="Use MSI >>" page="howto-msi.md" >}}
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
type: docs
|
||||
title: "How to: Use Managed Service Identities"
|
||||
linkTitle: "How to: Use MSI"
|
||||
weight: 40000
|
||||
description: "Learn how to use Managed Service Identities"
|
||||
---
|
||||
|
||||
Using MSI, authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity.
|
||||
|
||||
For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credential.
|
||||
|
||||
To get started with managed identities, you need to assign an identity to a new or existing Azure resource. The instructions depend on the service use. Check the following official documentation for the most appropriate instructions:
|
||||
|
||||
- [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/use-managed-identity)
|
||||
- [Azure Container Apps (ACA)](https://learn.microsoft.com/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml#using-managed-identity)
|
||||
- [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) (including Azure Web Apps and Azure Functions)
|
||||
- [Azure Virtual Machines (VM)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vm)
|
||||
- [Azure Virtual Machines Scale Sets (VMSS)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vmss)
|
||||
- [Azure Container Instance (ACI)](https://docs.microsoft.com/azure/container-instances/container-instances-managed-identity)
|
||||
|
||||
|
||||
After assigning a managed identity to your Azure resource, you will have credentials such as:
|
||||
|
||||
```json
|
||||
{
|
||||
"principalId": "<object-id>",
|
||||
"tenantId": "<tenant-id>",
|
||||
"type": "SystemAssigned",
|
||||
"userAssignedIdentities": null
|
||||
}
|
||||
```
|
||||
|
||||
From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your Service Principal.
|
||||
|
||||
## Next steps
|
||||
|
||||
{{< button text="Refer to Azure component specs >>" page="components-reference" >}}
|
|
@ -3,8 +3,16 @@ type: docs
|
|||
title: "Dapr extension for Azure Functions runtime"
|
||||
linkTitle: "Azure Functions extension"
|
||||
description: "Access Dapr capabilities from your Azure Functions runtime application"
|
||||
weight: 4000
|
||||
weight: 3000
|
||||
---
|
||||
|
||||
Dapr integrates with the Azure Functions runtime via an extension that lets a function seamlessly interact with Dapr. Azure Functions provides an event-driven programming model and Dapr provides cloud-native building blocks. With this extension, you can bring both together for serverless and event-driven apps. For more information read
|
||||
[Azure Functions extension for Dapr](https://cloudblogs.microsoft.com/opensource/2020/07/01/announcing-azure-functions-extension-for-dapr/) and visit the [Azure Functions extension](https://github.com/dapr/azure-functions-extension) repo to try out the samples.
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
The Dapr Functions extension is currently in preview.
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
Dapr integrates with the [Azure Functions runtime](https://learn.microsoft.com/azure/azure-functions/functions-overview) via an extension that lets a function seamlessly interact with Dapr. Azure Functions provides an event-driven programming model and Dapr provides cloud-native building blocks. The extension combines the two for serverless and event-driven apps.
|
||||
|
||||
Try out the [Dapr Functions extension](https://github.com/dapr/azure-functions-extension) samples.
|
||||
|
||||
{{< button text="Learn more about the Dapr Function extension in preview" link="https://cloudblogs.microsoft.com/opensource/2020/07/01/announcing-azure-functions-extension-for-dapr/" >}}
|
||||
|
|
|
@ -6,104 +6,12 @@ description: "Provision Dapr on your Azure Kubernetes Service (AKS) cluster with
|
|||
weight: 4000
|
||||
---
|
||||
|
||||
# Prerequisites
|
||||
- Azure subscription
|
||||
- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli-windows?tabs=azure-cli) and the ***aks-preview*** extension.
|
||||
- [Azure Kubernetes Service (AKS) cluster](https://docs.microsoft.com/azure/aks/tutorial-kubernetes-deploy-cluster?tabs=azure-cli)
|
||||
|
||||
## Install Dapr using the AKS Dapr extension
|
||||
The recommended approach for installing Dapr on AKS is to use the AKS Dapr extension. The extension offers support for all native Dapr configuration capabilities through command-line arguments via the Azure CLI and offers the option of opting into automatic minor version upgrades of the Dapr runtime.
|
||||
The recommended approach for installing Dapr on AKS is to use the AKS Dapr extension. The extension offers:
|
||||
- Support for all native Dapr configuration capabilities through command-line arguments via the Azure CLI
|
||||
- The option of opting into automatic minor version upgrades of the Dapr runtime
|
||||
|
||||
{{% alert title="Note" color="warning" %}}
|
||||
If you install Dapr through the AKS extension, our recommendation is to continue using the extension for future management of Dapr instead of the Dapr CLI. Combining the two tools can cause conflicts and result in undesired behavior.
|
||||
If you install Dapr through the AKS extension, best practice is to continue using the extension for future management of Dapr _instead of the Dapr CLI_. Combining the two tools can cause conflicts and result in undesired behavior.
|
||||
{{% /alert %}}
|
||||
|
||||
### How the extension works
|
||||
The Dapr extension works by provisioning the Dapr control plane on your AKS cluster through the Azure CLI. The dapr control plane consists of:
|
||||
|
||||
- **dapr-operator**: Manages component updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.)
|
||||
- **dapr-sidecar-injector**: Injects Dapr into annotated deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT`. This enables user-defined applications to communicate with Dapr without the need to hard-code Dapr port values.
|
||||
- **dapr-placement**: Used for actors only. Creates mapping tables that map actor instances to pods
|
||||
- **dapr-sentry**: Manages mTLS between services and acts as a certificate authority. For more information read the security overview.
|
||||
|
||||
### Extension Prerequisites
|
||||
In order to use the AKS Dapr extension, you must first enable the `AKS-ExtensionManager` and `AKS-Dapr` feature flags on your Azure subscription.
|
||||
|
||||
The below command will register the `AKS-ExtensionManager` and `AKS-Dapr` feature flags on your Azure subscription:
|
||||
|
||||
```bash
|
||||
az feature register --namespace "Microsoft.ContainerService" --name "AKS-ExtensionManager"
|
||||
az feature register --namespace "Microsoft.ContainerService" --name "AKS-Dapr"
|
||||
```
|
||||
|
||||
After a few minutes, check the status to show `Registered`. Confirm the registration status by using the az feature list command:
|
||||
|
||||
```bash
|
||||
az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-ExtensionManager')].{Name:name,State:properties.state}"
|
||||
az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-Dapr')].{Name:name,State:properties.state}"
|
||||
```
|
||||
|
||||
Next, refresh the registration of the `Microsoft.KubernetesConfiguration` and `Microsoft.ContainerService` resource providers by using the az provider register command:
|
||||
|
||||
```bash
|
||||
az provider register --namespace Microsoft.KubernetesConfiguration
|
||||
az provider register --namespace Microsoft.ContainerService
|
||||
```
|
||||
|
||||
#### Enable the Azure CLI extension for cluster extensions
|
||||
You will also need the `k8s-extension` Azure CLI extension. Install this by running the following commands:
|
||||
|
||||
```bash
|
||||
az extension add --name k8s-extension
|
||||
```
|
||||
|
||||
If the `k8s-extension` extension is already present, you can update it to the latest version using the below command:
|
||||
|
||||
```bash
|
||||
az extension update --name k8s-extension
|
||||
```
|
||||
|
||||
#### Create the extension and install Dapr on your AKS cluster
|
||||
After your subscription is registered to use Kubernetes extensions, install Dapr on your cluster by creating the Dapr extension. For example:
|
||||
|
||||
```bash
|
||||
az k8s-extension create --cluster-type managedClusters \
|
||||
--cluster-name myAKSCluster \
|
||||
--resource-group myResourceGroup \
|
||||
--name myDaprExtension \
|
||||
--extension-type Microsoft.Dapr
|
||||
```
|
||||
|
||||
Additionally, Dapr can automatically update its minor version. To enable this, set the `--auto-upgrade-minor-version` parameter to true:
|
||||
|
||||
```bash
|
||||
--auto-upgrade-minor-version true
|
||||
```
|
||||
|
||||
Once the k8-extension finishes provisioning, you can confirm that the Dapr control plane is installed on your AKS cluster by running:
|
||||
|
||||
```bash
|
||||
kubectl get pods -n dapr-system
|
||||
```
|
||||
|
||||
In the example output below, note how the Dapr control plane is installed with high availability mode, enabled by default.
|
||||
|
||||
```
|
||||
~ kubectl get pods -n dapr-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dapr-dashboard-5f49d48796-rnt5t 1/1 Running 0 1h
|
||||
dapr-operator-98579b8b4-fpz7k 1/1 Running 0 1h
|
||||
dapr-operator-98579b8b4-nn5vm 1/1 Running 0 1h
|
||||
dapr-operator-98579b8b4-pplqr 1/1 Running 0 1h
|
||||
dapr-placement-server-0 1/1 Running 0 1h
|
||||
dapr-placement-server-1 1/1 Running 0 1h
|
||||
dapr-placement-server-2 1/1 Running 0 1h
|
||||
dapr-sentry-775bccdddb-htcl7 1/1 Running 0 1h
|
||||
dapr-sentry-775bccdddb-vtfxj 1/1 Running 0 1h
|
||||
dapr-sentry-775bccdddb-w4l8x 1/1 Running 0 1h
|
||||
dapr-sidecar-injector-9555889bc-klb9g 1/1 Running 0 1h
|
||||
dapr-sidecar-injector-9555889bc-rpjwl 1/1 Running 0 1h
|
||||
dapr-sidecar-injector-9555889bc-rqjgt 1/1 Running 0 1h
|
||||
```
|
||||
|
||||
For more information about configuration options and targeting specific Dapr versions, see the official [AKS Dapr Extension Docs](https://docs.microsoft.com/azure/aks/dapr).
|
||||
{{< button text="Learn more about the Dapr extension for AKS" link="https://learn.microsoft.com/azure/aks/dapr" >}}
|
||||
|
|
|
@ -1,231 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Build workflow applications with Logic Apps"
|
||||
linkTitle: "Logic Apps workflows"
|
||||
description: "Learn how to build workflows applications using Dapr Workflows and Logic Apps runtime"
|
||||
weight: 3000
|
||||
---
|
||||
|
||||
Dapr Workflows is a lightweight host that allows developers to run cloud-native workflows locally, on-premises or any cloud environment using the [Azure Logic Apps](https://docs.microsoft.com/azure/logic-apps/logic-apps-overview) workflow engine and Dapr.
|
||||
|
||||
## Benefits
|
||||
|
||||
By using a workflow engine, business logic can be defined in a declarative, no-code fashion so application code doesn't need to change when a workflow changes. Dapr Workflows allows you to use workflows in a distributed application along with these added benefits:
|
||||
|
||||
- **Run workflows anywhere**: on your local machine, on-premises, on Kubernetes or in the cloud
|
||||
- **Built-in observability**: tracing, metrics and mTLS through Dapr
|
||||
- **gRPC and HTTP endpoints** for your workflows
|
||||
- Kick off workflows based on **Dapr bindings** events
|
||||
- Orchestrate complex workflows by **calling back to Dapr** to save state, publish a message and more
|
||||
|
||||
<img src="/images/workflows-diagram.png" width=500 alt="Diagram of Dapr Workflows">
|
||||
|
||||
## How it works
|
||||
|
||||
Dapr Workflows hosts a gRPC server that implements the Dapr Client API.
|
||||
|
||||
This allows users to start workflows using gRPC and HTTP endpoints through Dapr, or start a workflow asynchronously using Dapr bindings.
|
||||
Once a workflow request comes in, Dapr Workflows uses the Logic Apps SDK to execute the workflow.
|
||||
|
||||
## Supported workflow features
|
||||
|
||||
### Supported actions and triggers
|
||||
|
||||
- [HTTP](https://docs.microsoft.com/azure/connectors/connectors-native-http)
|
||||
- [Schedule](https://docs.microsoft.com/azure/logic-apps/concepts-schedule-automated-recurring-tasks-workflows)
|
||||
- [Request / Response](https://docs.microsoft.com/azure/connectors/connectors-native-reqres)
|
||||
|
||||
### Supported control workflows
|
||||
|
||||
- [All control workflows](https://docs.microsoft.com/azure/connectors/apis-list#control-workflow)
|
||||
|
||||
### Supported data manipulation
|
||||
|
||||
- [All data operations](https://docs.microsoft.com/azure/connectors/apis-list#manage-or-manipulate-data)
|
||||
|
||||
### Not supported
|
||||
|
||||
- [Managed connectors](https://docs.microsoft.com/azure/connectors/apis-list#managed-connectors)
|
||||
|
||||
## Example
|
||||
|
||||
Dapr Workflows can be used as the orchestrator for many otherwise complex activities. For example, invoking an external endpoint, saving the data to a state store, publishing the result to a different app or invoking a binding can all be done by calling back into Dapr from the workflow itself.
|
||||
|
||||
This is due to the fact Dapr runs as a sidecar next to the workflow host just as if it was any other app.
|
||||
|
||||
Examine [workflow2.json](/code/workflow.json) as an example of a workflow that does the following:
|
||||
|
||||
1. Calls into Azure Functions to get a JSON response
|
||||
2. Saves the result to a Dapr state store
|
||||
3. Sends the result to a Dapr binding
|
||||
4. Returns the result to the caller
|
||||
|
||||
Since Dapr supports many pluggable state stores and bindings, the workflow becomes portable between different environments (cloud, edge or on-premises) without the user changing the code - *because there is no code involved*.
|
||||
|
||||
## Get started
|
||||
|
||||
Prerequisites:
|
||||
|
||||
1. Install the [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
2. [Azure blob storage account](https://docs.microsoft.com/azure/storage/blobs/storage-blob-create-account-block-blob?tabs=azure-portal)
|
||||
|
||||
### Self-hosted
|
||||
|
||||
1. Make sure you have the Dapr runtime initialized:
|
||||
|
||||
```bash
|
||||
dapr init
|
||||
```
|
||||
|
||||
1. Set up the environment variables containing the Azure Storage Account credentials:
|
||||
|
||||
{{< tabs Windows "macOS/Linux" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
export STORAGE_ACCOUNT_KEY=<YOUR-STORAGE-ACCOUNT-KEY>
|
||||
export STORAGE_ACCOUNT_NAME=<YOUR-STORAGE-ACCOUNT-NAME>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
set STORAGE_ACCOUNT_KEY=<YOUR-STORAGE-ACCOUNT-KEY>
|
||||
set STORAGE_ACCOUNT_NAME=<YOUR-STORAGE-ACCOUNT-NAME>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
1. Move to the workflows directory and run the sample runtime:
|
||||
|
||||
```bash
|
||||
cd src/Dapr.Workflows
|
||||
|
||||
dapr run --app-id workflows --protocol grpc --port 3500 --app-port 50003 -- dotnet run --workflows-path ../../samples
|
||||
```
|
||||
|
||||
1. Invoke a workflow:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3500/v1.0/invoke/workflows/method/workflow1
|
||||
|
||||
{"value":"Hello from Logic App workflow running with Dapr!"}
|
||||
```
|
||||
|
||||
### Kubernetes
|
||||
|
||||
1. Make sure you have a running Kubernetes cluster and `kubectl` in your path.
|
||||
|
||||
1. Once you have the Dapr CLI installed, run:
|
||||
|
||||
```bash
|
||||
dapr init --kubernetes
|
||||
```
|
||||
|
||||
1. Wait until the Dapr pods have the status `Running`.
|
||||
|
||||
1. Create a Config Map for the workflow:
|
||||
|
||||
```bash
|
||||
kubectl create configmap workflows --from-file ./samples/workflow1.json
|
||||
```
|
||||
|
||||
1. Create a secret containing the Azure Storage Account credentials. Replace the account name and key values below with the actual credentials:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic dapr-workflows --from-literal=accountName=<YOUR-STORAGE-ACCOUNT-NAME> --from-literal=accountKey=<YOUR-STORAGE-ACCOUNT-KEY>
|
||||
```
|
||||
|
||||
1. Deploy Dapr Worfklows:
|
||||
|
||||
```bash
|
||||
kubectl apply -f deploy/deploy.yaml
|
||||
```
|
||||
|
||||
1. Create a port-forward to the dapr workflows container:
|
||||
|
||||
```bash
|
||||
kubectl port-forward deploy/dapr-workflows-host 3500:3500
|
||||
```
|
||||
|
||||
1. Invoke logic apps through Dapr:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3500/v1.0/invoke/workflows/method/workflow1
|
||||
|
||||
{"value":"Hello from Logic App workflow running with Dapr!"}
|
||||
```
|
||||
|
||||
## Invoking workflows using Dapr bindings
|
||||
|
||||
1. First, create any [Dapr binding]({{< ref components-reference >}}) of your choice. See [this]({{< ref howto-triggers >}}) How-To tutorial.
|
||||
|
||||
In order for Dapr Workflows to be able to start a workflow from a Dapr binding event, simply name the binding with the name of the workflow you want it to trigger.
|
||||
|
||||
Here's an example of a Kafka binding that will trigger a workflow named `workflow1`:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: workflow1
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
metadata:
|
||||
- name: topics
|
||||
value: topic1
|
||||
- name: brokers
|
||||
value: localhost:9092
|
||||
- name: consumerGroup
|
||||
value: group1
|
||||
- name: authRequired
|
||||
value: "false"
|
||||
```
|
||||
|
||||
1. Next, apply the Dapr component:
|
||||
|
||||
{{< tabs Self-hosted Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
Place the binding yaml file above in a `components` directory at the root of your application.
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
kubectl apply -f my_binding.yaml
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
1. Once an event is sent to the bindings component, check the logs Dapr Workflows to see the output.
|
||||
|
||||
{{< tabs Self-hosted Kubernetes >}}
|
||||
|
||||
{{% codetab %}}
|
||||
In standalone mode, the output will be printed to the local terminal.
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
On Kubernetes, run the following command:
|
||||
|
||||
```bash
|
||||
kubectl logs -l app=dapr-workflows-host -c host
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Example
|
||||
|
||||
Watch an example from the Dapr community call:
|
||||
|
||||
<div class="embed-responsive embed-responsive-16by9">
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/7fP-0Ixmi-w?start=116" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
</div>
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Blog announcement](https://cloudblogs.microsoft.com/opensource/2020/05/26/announcing-cloud-native-workflows-dapr-logic-apps/)
|
||||
- [Repo](https://github.com/dapr/workflows)
|
|
@ -1,22 +1,27 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Autoscaling a Dapr app with KEDA"
|
||||
linkTitle: "Autoscale with KEDA"
|
||||
title: "How to: Autoscale a Dapr app with KEDA"
|
||||
linkTitle: "How to: Autoscale with KEDA"
|
||||
description: "How to configure your Dapr application to autoscale using KEDA"
|
||||
weight: 2000
|
||||
weight: 3000
|
||||
---
|
||||
|
||||
Dapr, with its modular building-block approach, along with the 10+ different [pub/sub components]({{< ref pubsub >}}), make it easy to write message processing applications. Since Dapr can run in many environments (e.g. VM, bare-metal, Cloud, or Edge) the autoscaling of Dapr applications is managed by the hosting layer.
|
||||
Dapr, with its building-block API approach, along with the many [pub/sub components]({{< ref pubsub >}}), makes it easy to write message processing applications. Since Dapr can run in many environments (for example VMs, bare-metal, Cloud or Edge Kubernetes) the autoscaling of Dapr applications is managed by the hosting layer.
|
||||
|
||||
For Kubernetes, Dapr integrates with [KEDA](https://github.com/kedacore/keda), an event driven autoscaler for Kubernetes. Many of Dapr's pub/sub components overlap with the scalers provided by [KEDA](https://github.com/kedacore/keda) so it's easy to configure your Dapr deployment on Kubernetes to autoscale based on the back pressure using KEDA.
|
||||
For Kubernetes, Dapr integrates with [KEDA](https://github.com/kedacore/keda), an event driven autoscaler for Kubernetes. Many of Dapr's pub/sub components overlap with the scalers provided by [KEDA](https://github.com/kedacore/keda), so it's easy to configure your Dapr deployment on Kubernetes to autoscale based on the back pressure using KEDA.
|
||||
|
||||
This how-to walks through the configuration of a scalable Dapr application along with the back pressure on Kafka topic, however you can apply this approach to any [pub/sub components]({{< ref pubsub >}}) offered by Dapr.
|
||||
In this guide, you configure a scalable Dapr application, along with the back pressure on Kafka topic. However, you can apply this approach to _any_ [pub/sub components]({{< ref pubsub >}}) offered by Dapr.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
If you're working with Azure Container Apps, refer to the official Azure documentation for [scaling Dapr applications using KEDA scalers](https://learn.microsoft.com/azure/container-apps/dapr-keda-scaling).
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
## Install KEDA
|
||||
|
||||
To install KEDA, follow the [Deploying KEDA](https://keda.sh/docs/latest/deploy/) instructions on the KEDA website.
|
||||
|
||||
## Install Kafka (optional)
|
||||
## Install and deploy Kafka
|
||||
|
||||
If you don't have access to a Kafka service, you can install it into your Kubernetes cluster for this example by using Helm:
|
||||
|
||||
|
@ -39,16 +44,16 @@ kubectl rollout status statefulset.apps/kafka-cp-kafka -n kafka
|
|||
kubectl rollout status statefulset.apps/kafka-cp-zookeeper -n kafka
|
||||
```
|
||||
|
||||
When done, also deploy the Kafka client and wait until it's ready:
|
||||
Once installed, deploy the Kafka client and wait until it's ready:
|
||||
|
||||
```shell
|
||||
kubectl apply -n kafka -f deployment/kafka-client.yaml
|
||||
kubectl wait -n kafka --for=condition=ready pod kafka-client --timeout=120s
|
||||
```
|
||||
|
||||
Next, create the topic which is used in this example (for example `demo-topic`):
|
||||
## Create the Kafka topic
|
||||
|
||||
> The number of topic partitions is related to the maximum number of replicas KEDA creates for your deployments
|
||||
Create the topic used in this example (`demo-topic`):
|
||||
|
||||
```shell
|
||||
kubectl -n kafka exec -it kafka-client -- kafka-topics \
|
||||
|
@ -60,9 +65,11 @@ kubectl -n kafka exec -it kafka-client -- kafka-topics \
|
|||
--if-not-exists
|
||||
```
|
||||
|
||||
## Deploy a Dapr Pub/Sub component
|
||||
> The number of topic `partitions` is related to the maximum number of replicas KEDA creates for your deployments.
|
||||
|
||||
Next, we'll deploy the Dapr Kafka pub/sub component for Kubernetes. Paste the following YAML into a file named `kafka-pubsub.yaml`:
|
||||
## Deploy a Dapr pub/sub component
|
||||
|
||||
Deploy the Dapr Kafka pub/sub component for Kubernetes. Paste the following YAML into a file named `kafka-pubsub.yaml`:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -81,9 +88,11 @@ spec:
|
|||
value: autoscaling-subscriber
|
||||
```
|
||||
|
||||
The above YAML defines the pub/sub component that your application subscribes to, the `demo-topic` we created above. If you used the Kafka Helm install instructions above you can leave the `brokers` value as is. Otherwise, change this to the connection string to your Kafka brokers.
|
||||
The above YAML defines the pub/sub component that your application subscribes to and that [you created earlier (`demo-topic`)]({{< ref "#create-the-kakfa-topic" >}}).
|
||||
|
||||
Also notice the `autoscaling-subscriber` value set for `consumerID` which is used later to make sure that KEDA and your deployment use the same [Kafka partition offset](http://cloudurable.com/blog/kafka-architecture-topics/index.html#:~:text=Kafka%20continually%20appended%20to%20partitions,fit%20on%20a%20single%20server.).
|
||||
If you used the [Kafka Helm install instructions]({{< ref "#install-and-deploy-kafka" >}}), you can leave the `brokers` value as-is. Otherwise, change this value to the connection string to your Kafka brokers.
|
||||
|
||||
Notice the `autoscaling-subscriber` value set for `consumerID`. This value is used later to ensure that KEDA and your deployment use the same [Kafka partition offset](http://cloudurable.com/blog/kafka-architecture-topics/index.html#:~:text=Kafka%20continually%20appended%20to%20partitions,fit%20on%20a%20single%20server.).
|
||||
|
||||
Now, deploy the component to the cluster:
|
||||
|
||||
|
@ -93,7 +102,9 @@ kubectl apply -f kafka-pubsub.yaml
|
|||
|
||||
## Deploy KEDA autoscaler for Kafka
|
||||
|
||||
Next, we will deploy the KEDA scaling object that monitors the lag on the specified Kafka topic and configures the Kubernetes Horizontal Pod Autoscaler (HPA) to scale your Dapr deployment in and out.
|
||||
Deploy the KEDA scaling object that:
|
||||
- Monitors the lag on the specified Kafka topic
|
||||
- Configures the Kubernetes Horizontal Pod Autoscaler (HPA) to scale your Dapr deployment in and out
|
||||
|
||||
Paste the following into a file named `kafka_scaler.yaml`, and configure your Dapr deployment in the required place:
|
||||
|
||||
|
@ -117,19 +128,25 @@ spec:
|
|||
lagThreshold: "5"
|
||||
```
|
||||
|
||||
A few things to review here in the above file:
|
||||
Let's review a few metadata values in the file above:
|
||||
|
||||
* `name` in the `scaleTargetRef` section in the `spec:` is the Dapr ID of your app defined in the Deployment (The value of the `dapr.io/id` annotation)
|
||||
* `pollingInterval` is the frequency in seconds with which KEDA checks Kafka for current topic partition offset
|
||||
* `minReplicaCount` is the minimum number of replicas KEDA creates for your deployment. (Note, if your application takes a long time to start it may be better to set that to `1` to ensure at least one replica of your deployment is always running. Otherwise, set that to `0` and KEDA creates the first replica for you)
|
||||
* `maxReplicaCount` is the maximum number of replicas for your deployment. Given how [Kafka partition offset](http://cloudurable.com/blog/kafka-architecture-topics/index.html#:~:text=Kafka%20continually%20appended%20to%20partitions,fit%20on%20a%20single%20server.) works, you shouldn't set that value higher than the total number of topic partitions
|
||||
* `topic` in the Kafka `metadata` section which should be set to the same topic to which your Dapr deployment subscribe (In this example `demo-topic`)
|
||||
* Similarly the `bootstrapServers` should be set to the same broker connection string used in the `kafka-pubsub.yaml` file
|
||||
* The `consumerGroup` should be set to the same value as the `consumerID` in the `kafka-pubsub.yaml` file
|
||||
| Values | Description |
|
||||
| ------ | ----------- |
|
||||
| `scaleTargetRef`/`name` | The Dapr ID of your app defined in the Deployment (The value of the `dapr.io/id` annotation). |
|
||||
| `pollingInterval` | The frequency in seconds with which KEDA checks Kafka for current topic partition offset. |
|
||||
| `minReplicaCount` | The minimum number of replicas KEDA creates for your deployment. If your application takes a long time to start, it may be better to set this to `1` to ensure at least one replica of your deployment is always running. Otherwise, set to `0` and KEDA creates the first replica for you. |
|
||||
| `maxReplicaCount` | The maximum number of replicas for your deployment. Given how [Kafka partition offset](http://cloudurable.com/blog/kafka-architecture-topics/index.html#:~:text=Kafka%20continually%20appended%20to%20partitions,fit%20on%20a%20single%20server.) works, you shouldn't set that value higher than the total number of topic partitions. |
|
||||
| `triggers`/`metadata`/`topic` | Should be set to the same topic to which your Dapr deployment subscribed (in this example, `demo-topic`). |
|
||||
| `triggers`/`metadata`/`bootstrapServers` | Should be set to the same broker connection string used in the `kafka-pubsub.yaml` file. |
|
||||
| `triggers`/`metadata`/`consumerGroup` | Should be set to the same value as the `consumerID` in the `kafka-pubsub.yaml` file. |
|
||||
|
||||
> Note: setting the connection string, topic, and consumer group to the *same* values for both the Dapr service subscription and the KEDA scaler configuration is critical to ensure the autoscaling works correctly.
|
||||
{{% alert title="Important" color="warning" %}}
|
||||
Setting the connection string, topic, and consumer group to the *same* values for both the Dapr service subscription and the KEDA scaler configuration is critical to ensure the autoscaling works correctly.
|
||||
|
||||
Next, deploy the KEDA scaler to Kubernetes:
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
Deploy the KEDA scaler to Kubernetes:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kafka_scaler.yaml
|
||||
|
@ -137,6 +154,12 @@ kubectl apply -f kafka_scaler.yaml
|
|||
|
||||
All done!
|
||||
|
||||
Now, that the `ScaledObject` KEDA object is configured, your deployment will scale based on the lag of the Kafka topic. More information on configuring KEDA for Kafka topics is available [here](https://keda.sh/docs/2.0/scalers/apache-kafka/).
|
||||
## See the KEDA scaler work
|
||||
|
||||
You can now start publishing messages to your Kafka topic `demo-topic` and watch the pods autoscale when the lag threshold is higher than `5` topics, as we have defined in the KEDA scaler manifest. You can publish messages to the Kafka Dapr component by using the Dapr [Publish]({{< ref dapr-publish >}}) CLI command
|
||||
Now that the `ScaledObject` KEDA object is configured, your deployment will scale based on the lag of the Kafka topic. [Learn more about configuring KEDA for Kafka topics](https://keda.sh/docs/2.0/scalers/apache-kafka/).
|
||||
|
||||
As defined in the KEDA scaler manifest, you can now start publishing messages to your Kafka topic `demo-topic` and watch the pods autoscale when the lag threshold is higher than `5` topics. Publish messages to the Kafka Dapr component by using the Dapr [Publish]({{< ref dapr-publish >}}) CLI command.
|
||||
|
||||
## Next steps
|
||||
|
||||
[Learn about scaling your Dapr pub/sub or binding application with KEDA in Azure Container Apps](https://learn.microsoft.com/azure/container-apps/dapr-keda-scaling)
|
|
@ -1,35 +1,40 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Dapr's gRPC Interface"
|
||||
linkTitle: "gRPC interface"
|
||||
title: "How to: Use the gRPC interface in your Dapr application"
|
||||
linkTitle: "How to: gRPC interface"
|
||||
weight: 6000
|
||||
description: "Use the Dapr gRPC API in your application"
|
||||
type: docs
|
||||
---
|
||||
|
||||
# Dapr and gRPC
|
||||
Dapr implements both an HTTP and a gRPC API for local calls. [gRPC](https://grpc.io/) is useful for low-latency, high performance scenarios and has language integration using the proto clients.
|
||||
|
||||
Dapr implements both an HTTP and a gRPC API for local calls. gRPC is useful for low-latency, high performance scenarios and has language integration using the proto clients.
|
||||
|
||||
You can find a list of auto-generated clients [here](https://github.com/dapr/docs#sdks).
|
||||
[Find a list of auto-generated clients in the Dapr SDK documentation]({{< ref sdks >}}).
|
||||
|
||||
The Dapr runtime implements a [proto service](https://github.com/dapr/dapr/blob/master/dapr/proto/runtime/v1/dapr.proto) that apps can communicate with via gRPC.
|
||||
|
||||
In addition to calling Dapr via gRPC, Dapr supports service to service calls with gRPC by acting as a proxy. See more information [here]({{< ref howto-invoke-services-grpc.md >}}).
|
||||
In addition to calling Dapr via gRPC, Dapr supports service-to-service calls with gRPC by acting as a proxy. [Learn more in the gRPC service invocation how-to guide]({{< ref howto-invoke-services-grpc.md >}}).
|
||||
|
||||
## Configuring Dapr to communicate with an app via gRPC
|
||||
This guide demonstrates configuring and invoking Dapr with gRPC using a Go SDK application.
|
||||
|
||||
### Self hosted
|
||||
## Configure Dapr to communicate with an app via gRPC
|
||||
|
||||
When running in self hosted mode, use the `--app-protocol` flag to tell Dapr to use gRPC to talk to the app:
|
||||
{{< tabs "Self-hosted" "Kubernetes">}}
|
||||
<!--selfhosted-->
|
||||
{{% codetab %}}
|
||||
|
||||
When running in self-hosted mode, use the `--app-protocol` flag to tell Dapr to use gRPC to talk to the app.
|
||||
|
||||
```bash
|
||||
dapr run --app-protocol grpc --app-port 5005 node app.js
|
||||
```
|
||||
|
||||
This tells Dapr to communicate with your app via gRPC over port `5005`.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
### Kubernetes
|
||||
<!--k8s-->
|
||||
{{% codetab %}}
|
||||
|
||||
On Kubernetes, set the following annotations in your deployment YAML:
|
||||
|
||||
|
@ -58,178 +63,195 @@ spec:
|
|||
...
|
||||
```
|
||||
|
||||
## Invoking Dapr with gRPC - Go example
|
||||
{{% /codetab %}}
|
||||
|
||||
The following steps show you how to create a Dapr client and call the `SaveStateData` operation on it:
|
||||
{{< /tabs >}}
|
||||
|
||||
1. Import the package
|
||||
## Invoke Dapr with gRPC
|
||||
|
||||
```go
|
||||
package main
|
||||
The following steps show how to create a Dapr client and call the `SaveStateData` operation on it.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
1. Import the package:
|
||||
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
)
|
||||
```
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
)
|
||||
```
|
||||
|
||||
2. Create the client
|
||||
1. Create the client:
|
||||
|
||||
```go
|
||||
// just for this demo
|
||||
ctx := context.Background()
|
||||
data := []byte("ping")
|
||||
|
||||
// create the client
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
defer client.Close()
|
||||
```
|
||||
|
||||
3. Invoke the Save State method
|
||||
|
||||
```go
|
||||
// save state with the key key1
|
||||
err = client.SaveState(ctx, "statestore", "key1", data)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
log.Println("data saved")
|
||||
```
|
||||
|
||||
Hooray!
|
||||
```go
|
||||
// just for this demo
|
||||
ctx := context.Background()
|
||||
data := []byte("ping")
|
||||
|
||||
// create the client
|
||||
client, err := dapr.NewClient()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
defer client.Close()
|
||||
```
|
||||
|
||||
3. Invoke the `SaveState` method:
|
||||
|
||||
```go
|
||||
// save state with the key key1
|
||||
err = client.SaveState(ctx, "statestore", "key1", data)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
log.Println("data saved")
|
||||
```
|
||||
|
||||
Now you can explore all the different methods on the Dapr client.
|
||||
|
||||
## Creating a gRPC app with Dapr
|
||||
## Create a gRPC app with Dapr
|
||||
|
||||
The following steps will show you how to create an app that exposes a server for Dapr to communicate with.
|
||||
The following steps will show how to create an app that exposes a server for with which Dapr can communicate.
|
||||
|
||||
1. Import the package
|
||||
1. Import the package:
|
||||
|
||||
```go
|
||||
package main
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
|
||||
commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1"
|
||||
pb "github.com/dapr/go-sdk/dapr/proto/runtime/v1"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
```
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
1. Implement the interface:
|
||||
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
```go
|
||||
// server is our user app
|
||||
type server struct {
|
||||
pb.UnimplementedAppCallbackServer
|
||||
}
|
||||
|
||||
// EchoMethod is a simple demo method to invoke
|
||||
func (s *server) EchoMethod() string {
|
||||
return "pong"
|
||||
}
|
||||
|
||||
// This method gets invoked when a remote service has called the app through Dapr
|
||||
// The payload carries a Method to identify the method, a set of metadata properties and an optional payload
|
||||
func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) {
|
||||
var response string
|
||||
|
||||
switch in.Method {
|
||||
case "EchoMethod":
|
||||
response = s.EchoMethod()
|
||||
}
|
||||
|
||||
return &commonv1pb.InvokeResponse{
|
||||
ContentType: "text/plain; charset=UTF-8",
|
||||
Data: &any.Any{Value: []byte(response)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Dapr will call this method to get the list of topics the app wants to subscribe to. In this example, we are telling Dapr
|
||||
// To subscribe to a topic named TopicA
|
||||
func (s *server) ListTopicSubscriptions(ctx context.Context, in *empty.Empty) (*pb.ListTopicSubscriptionsResponse, error) {
|
||||
return &pb.ListTopicSubscriptionsResponse{
|
||||
Subscriptions: []*pb.TopicSubscription{
|
||||
{Topic: "TopicA"},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Dapr will call this method to get the list of bindings the app will get invoked by. In this example, we are telling Dapr
|
||||
// To invoke our app with a binding named storage
|
||||
func (s *server) ListInputBindings(ctx context.Context, in *empty.Empty) (*pb.ListInputBindingsResponse, error) {
|
||||
return &pb.ListInputBindingsResponse{
|
||||
Bindings: []string{"storage"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This method gets invoked every time a new event is fired from a registered binding. The message carries the binding name, a payload and optional metadata
|
||||
func (s *server) OnBindingEvent(ctx context.Context, in *pb.BindingEventRequest) (*pb.BindingEventResponse, error) {
|
||||
fmt.Println("Invoked from binding")
|
||||
return &pb.BindingEventResponse{}, nil
|
||||
}
|
||||
|
||||
// This method is fired whenever a message has been published to a topic that has been subscribed. Dapr sends published messages in a CloudEvents 0.3 envelope.
|
||||
func (s *server) OnTopicEvent(ctx context.Context, in *pb.TopicEventRequest) (*pb.TopicEventResponse, error) {
|
||||
fmt.Println("Topic message arrived")
|
||||
return &pb.TopicEventResponse{}, nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1"
|
||||
pb "github.com/dapr/go-sdk/dapr/proto/runtime/v1"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
```
|
||||
1. Create the server:
|
||||
|
||||
2. Implement the interface
|
||||
```go
|
||||
func main() {
|
||||
// create listener
|
||||
lis, err := net.Listen("tcp", ":50001")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
|
||||
// create grpc server
|
||||
s := grpc.NewServer()
|
||||
pb.RegisterAppCallbackServer(s, &server{})
|
||||
|
||||
fmt.Println("Client starting...")
|
||||
|
||||
// and start...
|
||||
if err := s.Serve(lis); err != nil {
|
||||
log.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// server is our user app
|
||||
type server struct {
|
||||
pb.UnimplementedAppCallbackServer
|
||||
}
|
||||
This creates a gRPC server for your app on port 50001.
|
||||
|
||||
// EchoMethod is a simple demo method to invoke
|
||||
func (s *server) EchoMethod() string {
|
||||
return "pong"
|
||||
}
|
||||
## Run the application
|
||||
|
||||
// This method gets invoked when a remote service has called the app through Dapr
|
||||
// The payload carries a Method to identify the method, a set of metadata properties and an optional payload
|
||||
func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) {
|
||||
var response string
|
||||
|
||||
switch in.Method {
|
||||
case "EchoMethod":
|
||||
response = s.EchoMethod()
|
||||
}
|
||||
|
||||
return &commonv1pb.InvokeResponse{
|
||||
ContentType: "text/plain; charset=UTF-8",
|
||||
Data: &any.Any{Value: []byte(response)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Dapr will call this method to get the list of topics the app wants to subscribe to. In this example, we are telling Dapr
|
||||
// To subscribe to a topic named TopicA
|
||||
func (s *server) ListTopicSubscriptions(ctx context.Context, in *empty.Empty) (*pb.ListTopicSubscriptionsResponse, error) {
|
||||
return &pb.ListTopicSubscriptionsResponse{
|
||||
Subscriptions: []*pb.TopicSubscription{
|
||||
{Topic: "TopicA"},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Dapr will call this method to get the list of bindings the app will get invoked by. In this example, we are telling Dapr
|
||||
// To invoke our app with a binding named storage
|
||||
func (s *server) ListInputBindings(ctx context.Context, in *empty.Empty) (*pb.ListInputBindingsResponse, error) {
|
||||
return &pb.ListInputBindingsResponse{
|
||||
Bindings: []string{"storage"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This method gets invoked every time a new event is fired from a registered binding. The message carries the binding name, a payload and optional metadata
|
||||
func (s *server) OnBindingEvent(ctx context.Context, in *pb.BindingEventRequest) (*pb.BindingEventResponse, error) {
|
||||
fmt.Println("Invoked from binding")
|
||||
return &pb.BindingEventResponse{}, nil
|
||||
}
|
||||
|
||||
// This method is fired whenever a message has been published to a topic that has been subscribed. Dapr sends published messages in a CloudEvents 0.3 envelope.
|
||||
func (s *server) OnTopicEvent(ctx context.Context, in *pb.TopicEventRequest) (*pb.TopicEventResponse, error) {
|
||||
fmt.Println("Topic message arrived")
|
||||
return &pb.TopicEventResponse{}, nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
3. Create the server
|
||||
|
||||
```go
|
||||
func main() {
|
||||
// create listener
|
||||
lis, err := net.Listen("tcp", ":50001")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
|
||||
// create grpc server
|
||||
s := grpc.NewServer()
|
||||
pb.RegisterAppCallbackServer(s, &server{})
|
||||
|
||||
fmt.Println("Client starting...")
|
||||
|
||||
// and start...
|
||||
if err := s.Serve(lis); err != nil {
|
||||
log.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This creates a gRPC server for your app on port 50001.
|
||||
|
||||
4. Run your app
|
||||
{{< tabs "Self-hosted" "Kubernetes">}}
|
||||
<!--selfhosted-->
|
||||
{{% codetab %}}
|
||||
|
||||
To run locally, use the Dapr CLI:
|
||||
|
||||
```
|
||||
```bash
|
||||
dapr run --app-id goapp --app-port 50001 --app-protocol grpc go run main.go
|
||||
```
|
||||
|
||||
On Kubernetes, set the required `dapr.io/app-protocol: "grpc"` and `dapr.io/app-port: "50001` annotations in your pod spec template as mentioned above.
|
||||
{{% /codetab %}}
|
||||
|
||||
<!--k8s-->
|
||||
{{% codetab %}}
|
||||
|
||||
On Kubernetes, set the required `dapr.io/app-protocol: "grpc"` and `dapr.io/app-port: "50001` annotations in your pod spec template, as mentioned above.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
## Other languages
|
||||
|
||||
You can use Dapr with any language supported by Protobuf, and not just with the currently available generated SDKs.
|
||||
Using the [protoc](https://developers.google.com/protocol-buffers/docs/downloads) tool you can generate the Dapr clients for other languages like Ruby, C++, Rust and others.
|
||||
|
||||
Using the [protoc](https://developers.google.com/protocol-buffers/docs/downloads) tool, you can generate the Dapr clients for other languages like Ruby, C++, Rust, and others.
|
||||
|
||||
## Related Topics
|
||||
- [Service invocation building block]({{< ref service-invocation >}})
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
---
|
||||
type: docs
|
||||
weight: 5000
|
||||
title: "Use the Dapr CLI in a GitHub Actions workflow"
|
||||
linkTitle: "GitHub Actions"
|
||||
title: "How to: Use the Dapr CLI in a GitHub Actions workflow"
|
||||
linkTitle: "How to: GitHub Actions"
|
||||
description: "Add the Dapr CLI to your GitHub Actions to deploy and manage Dapr in your environments."
|
||||
---
|
||||
|
||||
Dapr can be integrated with GitHub Actions via the [Dapr tool installer](https://github.com/marketplace/actions/dapr-tool-installer) available in the GitHub Marketplace. This installer adds the Dapr CLI to your workflow, allowing you to deploy, manage, and upgrade Dapr across your environments.
|
||||
|
||||
Copy and paste the following installer snippet into your applicatin's YAML file to get started:
|
||||
## Install the Dapr CLI via the Dapr tool installer
|
||||
|
||||
Copy and paste the following installer snippet into your application's YAML file:
|
||||
|
||||
```yaml
|
||||
- name: Dapr tool installer
|
||||
|
@ -21,6 +23,8 @@ Refer to the [`action.yml` metadata file](https://github.com/dapr/setup-dapr/blo
|
|||
|
||||
## Example
|
||||
|
||||
For example, for an application using the [Dapr extention for Azure Kubernetes Service (AKS)]({{< ref azure-kubernetes-service-extension.md >}}), your application YAML will look like the following:
|
||||
|
||||
```yaml
|
||||
- name: Install Dapr
|
||||
uses: dapr/setup-dapr@v1
|
||||
|
@ -28,21 +32,22 @@ Refer to the [`action.yml` metadata file](https://github.com/dapr/setup-dapr/blo
|
|||
version: '{{% dapr-latest-version long="true" %}}'
|
||||
|
||||
- name: Initialize Dapr
|
||||
shell: pwsh
|
||||
shell: bash
|
||||
run: |
|
||||
# Get the credentials to K8s to use with dapr init
|
||||
az aks get-credentials --resource-group ${{ env.RG_NAME }} --name "${{ steps.azure-deployment.outputs.aksName }}"
|
||||
|
||||
|
||||
# Initialize Dapr
|
||||
# Group the Dapr init logs so these lines can be collapsed.
|
||||
Write-Output "::group::Initialize Dapr"
|
||||
echo "::group::Initialize Dapr"
|
||||
dapr init --kubernetes --wait --runtime-version ${{ env.DAPR_VERSION }}
|
||||
Write-Output "::endgroup::"
|
||||
echo "::endgroup::"
|
||||
|
||||
dapr status --kubernetes
|
||||
working-directory: ./twitter-sentiment-processor/demos/demo3
|
||||
working-directory: ./demos/demo3
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
Learn more about [GitHub Actions](https://docs.github.com/en/actions).
|
||||
- Learn more about [GitHub Actions](https://docs.github.com/en/actions).
|
||||
- Follow the tutorial to learn how [GitHub Actions works with your Dapr container app (Azure Container Apps)](https://learn.microsoft.com/azure/container-apps/dapr-github-actions?tabs=azure-cli)
|
|
@ -1,31 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Running Dapr and Open Service Mesh together"
|
||||
linkTitle: "Open Service Mesh"
|
||||
weight: 4000
|
||||
description: "Learn how to run both Open Service Mesh and Dapr on the same Kubernetes cluster"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
[Open Service Mesh (OSM)](https://openservicemesh.io/) is a lightweight, extensible, cloud native service mesh that allows users to uniformly manage, secure, and get out-of-the-box observability features for highly dynamic microservice environments.
|
||||
|
||||
{{< button text="Learn more" link="https://openservicemesh.io/" >}}
|
||||
|
||||
## Dapr integration
|
||||
|
||||
Users are able to leverage both OSM SMI traffic policies and Dapr capabilities on the same Kubernetes cluster. Visit [this guide](https://docs.openservicemesh.io/docs/integrations/demo_dapr/) to get started.
|
||||
|
||||
{{< button text="Deploy OSM and Dapr" link="https://docs.openservicemesh.io/docs/integrations/demo_dapr/" >}}
|
||||
|
||||
## Example
|
||||
|
||||
Watch the OSM team present the OSM and Dapr integration in the 05/18/2021 community call:
|
||||
|
||||
<div class="embed-responsive embed-responsive-16by9">
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/LSYyTL0nS8Y?start=1916" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
</div>
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Dapr and service meshes]({{< ref service-mesh.md >}})
|
|
@ -173,8 +173,7 @@ Below are the supported parameters for VS Code tasks. These parameters are equiv
|
|||
| `appId`| The unique ID of the application. Used for service discovery, state encapsulation and the pub/sub consumer ID | Yes | `"appId": "divideapp"`
|
||||
| `appMaxConcurrency` | Limit the concurrency of your application. A valid value is any number larger than 0 | No | `"appMaxConcurrency": -1`
|
||||
| `appPort` | This parameter tells Dapr which port your application is listening on | Yes | `"appPort": 4000`
|
||||
| `appProtocol` | Tells Dapr which protocol your application is using. Valid options are http and grpc. Default is http | No | `"appProtocol": "http"`
|
||||
| `appSsl` | Sets the URI scheme of the app to https and attempts an SSL connection | No | `"appSsl": true`
|
||||
| `appProtocol` | Tells Dapr which protocol your application is using. Valid options are `http`, `grpc`, `https`, `grpcs`, `h2c`. Default is `http`. | No | `"appProtocol": "http"`
|
||||
| `args` | Sets a list of arguments to pass on to the Dapr app | No | "args": []
|
||||
| `componentsPath` | Path for components directory. If empty, components will not be loaded. | No | `"componentsPath": "./components"`
|
||||
| `config` | Tells Dapr which Configuration CRD to use | No | `"config": "./config"`
|
||||
|
|
|
@ -67,6 +67,14 @@ You can also name each app directory's `.dapr` directory something other than `.
|
|||
|
||||
## Logs
|
||||
|
||||
The run template provides two log destination fields for each application and its associated daprd process:
|
||||
|
||||
1. `appLogDestination` : This field configures the log destination for the application. The possible values are `console`, `file` and `fileAndConsole`. The default value is `fileAndConsole` where application logs are written to both console and to a file by default.
|
||||
|
||||
2. `daprdLogDestination` : This field configures the log destination for the `daprd` process. The possible values are `console`, `file` and `fileAndConsole`. The default value is `file` where the `daprd` logs are written to a file by default.
|
||||
|
||||
#### Log file format
|
||||
|
||||
Logs for application and `daprd` are captured in separate files. These log files are created automatically under `.dapr/logs` directory under each application directory (`appDirPath` in the template). These log file names follow the pattern seen below:
|
||||
|
||||
- `<appID>_app_<timestamp>.log` (file name format for `app` log)
|
||||
|
@ -74,6 +82,7 @@ Logs for application and `daprd` are captured in separate files. These log files
|
|||
|
||||
Even if you've decided to rename your resources folder to something other than `.dapr`, the log files are written only to the `.dapr/logs` folder (created in the application directory).
|
||||
|
||||
|
||||
## Watch the demo
|
||||
|
||||
Watch [this video for an overview on Multi-App Run](https://youtu.be/s1p9MNl4VGo?t=2456):
|
||||
|
|
|
@ -76,12 +76,15 @@ common: # optional section for variables shared across apps
|
|||
apps:
|
||||
- appID: webapp # optional
|
||||
appDirPath: .dapr/webapp/ # REQUIRED
|
||||
resourcesPath: .dapr/resources # (optional) can be default by convention
|
||||
resourcesPath: .dapr/resources # deprecated
|
||||
resourcesPaths: .dapr/resources # comman separated resources paths. (optional) can be default by convention
|
||||
configFilePath: .dapr/config.yaml # (optional) can be default by convention too, ignore if file is not found.
|
||||
appProtocol: http
|
||||
appPort: 8080
|
||||
appHealthCheckPath: "/healthz"
|
||||
command: ["python3" "app.py"]
|
||||
appLogDestination: file # (optional), can be file, console or fileAndConsole. default is fileAndConsole.
|
||||
daprdLogDestination: file # (optional), can be file, console or fileAndConsole. default is file.
|
||||
- appID: backend # optional
|
||||
appDirPath: .dapr/backend/ # REQUIRED
|
||||
appProtocol: grpc
|
||||
|
@ -110,7 +113,8 @@ The properties for the Multi-App Run template align with the `dapr run` CLI flag
|
|||
|--------------------------|:--------:|--------|---------|
|
||||
| `appDirPath` | Y | Path to the your application code | `./webapp/`, `./backend/` |
|
||||
| `appID` | N | Application's app ID. If not provided, will be derived from `appDirPath` | `webapp`, `backend` |
|
||||
| `resourcesPath` | N | Path to your Dapr resources. Can be default by convention; ignore if directory isn't found | `./app/components`, `./webapp/components` |
|
||||
| `resourcesPath` | N | **Deprecated**. Path to your Dapr resources. Can be default by convention| `./app/components`, `./webapp/components` |
|
||||
| `resourcesPaths` | N | Comma separated paths to your Dapr resources. Can be default by convention | `./app/components`, `./webapp/components` |
|
||||
| `configFilePath` | N | Path to your application's configuration file | `./webapp/config.yaml` |
|
||||
| `appProtocol` | N | The protocol Dapr uses to talk to the application. | `http`, `grpc` |
|
||||
| `appPort` | N | The port your application is listening on | `8080`, `3000` |
|
||||
|
@ -137,6 +141,8 @@ The properties for the Multi-App Run template align with the `dapr run` CLI flag
|
|||
| `enableApiLogging` | N | Enable the logging of all API calls from application to Dapr | |
|
||||
| `runtimePath` | N | Dapr runtime install path | |
|
||||
| `env` | N | Map to environment variable; environment variables applied per application will overwrite environment variables shared across applications | `DEBUG`, `DAPR_HOST_ADD` |
|
||||
| `appLogDestination` | N | Log destination for outputting app logs; Its value can be file, console or fileAndConsole. Default is fileAndConsole | `file`, `console`, `fileAndConsole` |
|
||||
| `daprdLogDestination` | N | Log destination for outputting daprd logs; Its value can be file, console or fileAndConsole. Default is file | `file`, `console`, `fileAndConsole` |
|
||||
|
||||
## Next steps
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ Hit the ground running with our Dapr quickstarts, complete with code samples aim
|
|||
| [Service Invocation]({{< ref serviceinvocation-quickstart.md >}}) | Synchronous communication between two services using HTTP or gRPC. |
|
||||
| [State Management]({{< ref statemanagement-quickstart.md >}}) | Store a service's data as key/value pairs in supported state stores. |
|
||||
| [Bindings]({{< ref bindings-quickstart.md >}}) | Work with external systems using input bindings to respond to events and output bindings to call operations. |
|
||||
| [Actors]({{< ref actors-quickstart.md >}}) | Run a microservice and a simple console client to demonstrate stateful object patterns in Dapr Actors. |
|
||||
| [Secrets Management]({{< ref secrets-quickstart.md >}}) | Securely fetch secrets. |
|
||||
| [Configuration]({{< ref configuration-quickstart.md >}}) | Get configuration items and subscribe for configuration updates. |
|
||||
| [Resiliency]({{< ref resiliency >}}) | Define and apply fault-tolerance policies to your Dapr API requests. |
|
||||
|
|
|
@ -0,0 +1,257 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Quickstart: Actors"
|
||||
linkTitle: "Actors"
|
||||
weight: 75
|
||||
description: "Get started with Dapr's Actors building block"
|
||||
---
|
||||
|
||||
Let's take a look at Dapr's [Actors building block]({{< ref actors >}}). In this Quickstart, you will run a smart device microservice and a simple console client to demonstrate the stateful object patterns in Dapr Actors.
|
||||
|
||||
Currently, you can experience this actors quickstart using the .NET SDK.
|
||||
|
||||
{{< tabs ".NET" >}}
|
||||
|
||||
<!-- .NET -->
|
||||
{{% codetab %}}
|
||||
|
||||
As a quick overview of the .NET actors quickstart:
|
||||
|
||||
1. Using a `SmartDevice.Service` microservice, you host:
|
||||
- Two `SmartDectectorActor` smoke alarm objects
|
||||
- A `ControllerActor` object that commands and controls the smart devices
|
||||
1. Using a `SmartDevice.Client` console app, the client app interacts with each actor, or the controller, to perform actions in aggregate.
|
||||
1. The `SmartDevice.Interfaces` contains the shared interfaces and data types used by both the service and client apps.
|
||||
|
||||
<img src="/images/actors-quickstart/actors-quickstart.png" width=800 style="padding-bottom:15px;">
|
||||
|
||||
### Pre-requisites
|
||||
|
||||
For this example, you will need:
|
||||
|
||||
- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
|
||||
- [.NET SDK or .NET 6 SDK installed](https://dotnet.microsoft.com/download).
|
||||
<!-- IGNORE_LINKS -->
|
||||
- [Docker Desktop](https://www.docker.com/products/docker-desktop)
|
||||
<!-- END_IGNORE -->
|
||||
|
||||
### Step 1: Set up the environment
|
||||
|
||||
Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/actors).
|
||||
|
||||
```bash
|
||||
git clone https://github.com/dapr/quickstarts.git
|
||||
```
|
||||
|
||||
### Step 2: Run the service app
|
||||
|
||||
In a new terminal window, navigate to the `actors/csharp/sdk/service` directory and restore dependencies:
|
||||
|
||||
```bash
|
||||
cd actors/csharp/sdk/service
|
||||
dotnet build
|
||||
```
|
||||
|
||||
Run the `SmartDevice.Service`, which will start service itself and the Dapr sidecar:
|
||||
|
||||
```bash
|
||||
dapr run --app-id actorservice --app-port 5001 --dapr-http-port 3500 --resources-path ../../../resources -- dotnet run --urls=http://localhost:5001/
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```bash
|
||||
== APP == info: Microsoft.AspNetCore.Hosting.Diagnostics[1]
|
||||
== APP == Request starting HTTP/1.1 GET http://127.0.0.1:5001/healthz - -
|
||||
== APP == info: Microsoft.AspNetCore.Routing.EndpointMiddleware[0]
|
||||
== APP == Executing endpoint 'Dapr Actors Health Check'
|
||||
== APP == info: Microsoft.AspNetCore.Routing.EndpointMiddleware[1]
|
||||
== APP == Executed endpoint 'Dapr Actors Health Check'
|
||||
== APP == info: Microsoft.AspNetCore.Hosting.Diagnostics[2]
|
||||
== APP == Request finished HTTP/1.1 GET http://127.0.0.1:5001/healthz - - - 200 - text/plain 5.2599ms
|
||||
```
|
||||
|
||||
### Step 3: Run the client app
|
||||
|
||||
In a new terminal instance, navigate to the `actors/csharp/sdk/client` directory and install the dependencies:
|
||||
|
||||
```bash
|
||||
cd ./actors/csharp/sdk/client
|
||||
dotnet build
|
||||
```
|
||||
|
||||
Run the `SmartDevice.Client` app:
|
||||
|
||||
```bash
|
||||
dapr run --app-id actorclient -- dotnet run
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```bash
|
||||
== APP == Startup up...
|
||||
== APP == Calling SetDataAsync on SmokeDetectorActor:1...
|
||||
== APP == Got response: Success
|
||||
== APP == Calling GetDataAsync on SmokeDetectorActor:1...
|
||||
== APP == Device 1 state: Location: First Floor, Status: Ready
|
||||
== APP == Calling SetDataAsync on SmokeDetectorActor:2...
|
||||
== APP == Got response: Success
|
||||
== APP == Calling GetDataAsync on SmokeDetectorActor:2...
|
||||
== APP == Device 2 state: Location: Second Floor, Status: Ready
|
||||
== APP == Registering the IDs of both Devices...
|
||||
== APP == Registered devices: 1, 2
|
||||
== APP == Detecting smoke on Device 1...
|
||||
== APP == Device 1 state: Location: First Floor, Status: Alarm
|
||||
== APP == Device 2 state: Location: Second Floor, Status: Alarm
|
||||
== APP == Sleeping for 16 seconds before checking status again to see reminders fire and clear alarms
|
||||
== APP == Device 1 state: Location: First Floor, Status: Ready
|
||||
== APP == Device 2 state: Location: Second Floor, Status: Ready
|
||||
```
|
||||
|
||||
### (Optional) Step 4: View in Zipkin
|
||||
|
||||
If you have Zipkin configured for Dapr locally on your machine, you can view the actor's interaction with the client in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
|
||||
|
||||
<img src="/images/actors-quickstart/actor-client-interaction-zipkin.png" width=800 style="padding-bottom:15px;">
|
||||
|
||||
|
||||
### What happened?
|
||||
|
||||
When you ran the client app, a few things happened:
|
||||
|
||||
1. Two `SmartDetectorActor` actors were [created in the client application](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/client/Program.cs) and initialized with object state with:
|
||||
- `ActorProxy.Create<ISmartDevice>(actorId, actorType)`
|
||||
- `proxySmartDevice.SetDataAsync(data)`
|
||||
|
||||
These objects are re-entrant and hold the state, as shown by `proxySmartDevice.GetDataAsync()`.
|
||||
|
||||
```csharp
|
||||
// Actor Ids and types
|
||||
var deviceId1 = "1";
|
||||
var deviceId2 = "2";
|
||||
var smokeDetectorActorType = "SmokeDetectorActor";
|
||||
var controllerActorType = "ControllerActor";
|
||||
|
||||
Console.WriteLine("Startup up...");
|
||||
|
||||
// An ActorId uniquely identifies the first actor instance for the first device
|
||||
var deviceActorId1 = new ActorId(deviceId1);
|
||||
|
||||
// Create a new instance of the data class that will be stored in the first actor
|
||||
var deviceData1 = new SmartDeviceData(){
|
||||
Location = "First Floor",
|
||||
Status = "Ready",
|
||||
};
|
||||
|
||||
// Create the local proxy by using the same interface that the service implements.
|
||||
var proxySmartDevice1 = ActorProxy.Create<ISmartDevice>(deviceActorId1, smokeDetectorActorType);
|
||||
|
||||
// Now you can use the actor interface to call the actor's methods.
|
||||
Console.WriteLine($"Calling SetDataAsync on {smokeDetectorActorType}:{deviceActorId1}...");
|
||||
var setDataResponse1 = await proxySmartDevice1.SetDataAsync(deviceData1);
|
||||
Console.WriteLine($"Got response: {setDataResponse1}");
|
||||
|
||||
Console.WriteLine($"Calling GetDataAsync on {smokeDetectorActorType}:{deviceActorId1}...");
|
||||
var storedDeviceData1 = await proxySmartDevice1.GetDataAsync();
|
||||
Console.WriteLine($"Device 1 state: {storedDeviceData1}");
|
||||
|
||||
// Create a second actor for second device
|
||||
var deviceActorId2 = new ActorId(deviceId2);
|
||||
|
||||
// Create a new instance of the data class that will be stored in the first actor
|
||||
var deviceData2 = new SmartDeviceData(){
|
||||
Location = "Second Floor",
|
||||
Status = "Ready",
|
||||
};
|
||||
|
||||
// Create the local proxy by using the same interface that the service implements.
|
||||
var proxySmartDevice2 = ActorProxy.Create<ISmartDevice>(deviceActorId2, smokeDetectorActorType);
|
||||
|
||||
// Now you can use the actor interface to call the second actor's methods.
|
||||
Console.WriteLine($"Calling SetDataAsync on {smokeDetectorActorType}:{deviceActorId2}...");
|
||||
var setDataResponse2 = await proxySmartDevice2.SetDataAsync(deviceData2);
|
||||
Console.WriteLine($"Got response: {setDataResponse2}");
|
||||
|
||||
Console.WriteLine($"Calling GetDataAsync on {smokeDetectorActorType}:{deviceActorId2}...");
|
||||
var storedDeviceData2 = await proxySmartDevice2.GetDataAsync();
|
||||
Console.WriteLine($"Device 2 state: {storedDeviceData2}");
|
||||
```
|
||||
|
||||
1. The [`DetectSmokeAsync` method of `SmartDetectorActor 1` is called](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/service/SmokeDetectorActor.cs#L70).
|
||||
|
||||
```csharp
|
||||
public async Task DetectSmokeAsync()
|
||||
{
|
||||
var controllerActorId = new ActorId("controller");
|
||||
var controllerActorType = "ControllerActor";
|
||||
var controllerProxy = ProxyFactory.CreateActorProxy<IController>(controllerActorId, controllerActorType);
|
||||
await controllerProxy.TriggerAlarmForAllDetectors();
|
||||
}
|
||||
```
|
||||
|
||||
1. The [`TriggerAlarmForAllDetectors` method of `ControllerActor` is called](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/service/ControllerActor.cs#L54). The `ControllerActor` internally triggers all alarms when smoke is detected
|
||||
|
||||
```csharp
|
||||
public async Task TriggerAlarmForAllDetectors()
|
||||
{
|
||||
var deviceIds = await ListRegisteredDeviceIdsAsync();
|
||||
foreach (var deviceId in deviceIds)
|
||||
{
|
||||
var actorId = new ActorId(deviceId);
|
||||
var proxySmartDevice = ProxyFactory.CreateActorProxy<ISmartDevice>(actorId, "SmokeDetectorActor");
|
||||
await proxySmartDevice.SoundAlarm();
|
||||
}
|
||||
|
||||
// Register a reminder to refresh and clear alarm state every 15 seconds
|
||||
await this.RegisterReminderAsync("AlarmRefreshReminder", null, TimeSpan.FromSeconds(15), TimeSpan.FromSeconds(15));
|
||||
}
|
||||
```
|
||||
|
||||
The console [prints a message indicating that smoke has been detected](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/client/Program.cs#L65).
|
||||
|
||||
```csharp
|
||||
// Smoke is detected on device 1 that triggers an alarm on all devices.
|
||||
Console.WriteLine($"Detecting smoke on Device 1...");
|
||||
proxySmartDevice1 = ActorProxy.Create<ISmartDevice>(deviceActorId1, smokeDetectorActorType);
|
||||
await proxySmartDevice1.DetectSmokeAsync();
|
||||
```
|
||||
|
||||
1. The [`SoundAlarm` methods](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/service/SmokeDetectorActor.cs#L78) of `SmartDetectorActor 1` and `2` are called.
|
||||
|
||||
```csharp
|
||||
storedDeviceData1 = await proxySmartDevice1.GetDataAsync();
|
||||
Console.WriteLine($"Device 1 state: {storedDeviceData1}");
|
||||
storedDeviceData2 = await proxySmartDevice2.GetDataAsync();
|
||||
Console.WriteLine($"Device 2 state: {storedDeviceData2}");
|
||||
```
|
||||
|
||||
1. The `ControllerActor` also creates a durable reminder to call `ClearAlarm` after 15 seconds using `RegisterReminderAsync`.
|
||||
|
||||
```csharp
|
||||
// Register a reminder to refresh and clear alarm state every 15 seconds
|
||||
await this.RegisterReminderAsync("AlarmRefreshReminder", null, TimeSpan.FromSeconds(15), TimeSpan.FromSeconds(15));
|
||||
```
|
||||
|
||||
For full context of the sample, take a look at the following code:
|
||||
|
||||
- [`SmartDetectorActor.cs`](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/service/SmokeDetectorActor.cs): Implements the smart device actors
|
||||
- [`ControllerActor.cs`](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/service/ControllerActor.cs): Implements the controller actor that manages all devices
|
||||
- [`ISmartDevice`](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/interfaces/ISmartDevice.cs): The method definitions and shared data types for each `SmartDetectorActor`
|
||||
- [`IController`](https://github.com/dapr/quickstarts/blob/master/actors/csharp/sdk/interfaces/IController.cs): The method definitions and shared data types for the `ControllerActor`
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Tell us what you think!
|
||||
|
||||
We're continuously working to improve our Quickstart examples and value your feedback. Did you find this Quickstart helpful? Do you have suggestions for improvement?
|
||||
|
||||
Join the discussion in our [discord channel](https://discord.com/channels/778680217417809931/953427615916638238).
|
||||
|
||||
## Next steps
|
||||
|
||||
Learn more about [the Actor building block]({{< ref actors >}})
|
||||
|
||||
{{< button text="Explore Dapr tutorials >>" page="getting-started/tutorials/_index.md" >}}
|
|
@ -11,7 +11,7 @@ Let's take a look at Dapr's [Bindings building block]({{< ref bindings >}}). Usi
|
|||
- Trigger your app with events coming in from external systems.
|
||||
- Interface with external systems.
|
||||
|
||||
In this Quickstart, you will schedule a batch script to run every 10 seconds using an input [Cron](https://docs.dapr.io/reference/components-reference/supported-bindings/cron/) binding. The script processes a JSON file and outputs data to a SQL database using the [PostgreSQL](https://docs.dapr.io/reference/components-reference/supported-bindings/postgres) Dapr binding.
|
||||
In this Quickstart, you will schedule a batch script to run every 10 seconds using an input [Cron]({{< ref cron.md >}}) binding. The script processes a JSON file and outputs data to a SQL database using the [PostgreSQL]({{< ref postgresql.md >}}) Dapr binding.
|
||||
|
||||
<img src="/images/bindings-quickstart/bindings-quickstart.png" width=800 style="padding-bottom:15px;">
|
||||
|
||||
|
@ -98,7 +98,7 @@ The code inside the `process_batch` function is executed every 10 seconds (defin
|
|||
def process_batch():
|
||||
```
|
||||
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgres.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgresql.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
|
||||
```python
|
||||
with DaprClient() as d:
|
||||
|
@ -140,7 +140,7 @@ In a new terminal, verify the same data has been inserted into the database. Nav
|
|||
cd bindings/db
|
||||
```
|
||||
|
||||
Run the following to start the interactive Postgres CLI:
|
||||
Run the following to start the interactive *psql* CLI:
|
||||
|
||||
```bash
|
||||
docker exec -i -t postgres psql --username postgres -p 5432 -h localhost --no-password
|
||||
|
@ -193,16 +193,16 @@ spec:
|
|||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
||||
#### `component\binding-postgres.yaml` component file
|
||||
#### `component\binding-postgresql.yaml` component file
|
||||
|
||||
When you execute the `dapr run` command and specify the component path, the Dapr sidecar:
|
||||
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgres.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgres.yaml` file
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgresql.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgresql.yaml` file
|
||||
|
||||
With the `binding-postgres.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
With the `binding-postgresql.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
|
||||
The PostgreSQL `binding-postgres.yaml` file included for this Quickstart contains the following:
|
||||
The PostgreSQL `binding-postgresql.yaml` file included for this Quickstart contains the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -211,7 +211,7 @@ metadata:
|
|||
name: sqldb
|
||||
namespace: quickstarts
|
||||
spec:
|
||||
type: bindings.postgres
|
||||
type: bindings.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
|
@ -304,7 +304,7 @@ async function start() {
|
|||
}
|
||||
```
|
||||
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgres.yaml`]({{< ref "##componentsbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgresql.yaml`]({{< ref "##componentsbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
|
||||
```javascript
|
||||
async function processBatch(){
|
||||
|
@ -395,16 +395,16 @@ spec:
|
|||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
||||
#### `component\binding-postgres.yaml` component file
|
||||
#### `component\binding-postgresql.yaml` component file
|
||||
|
||||
When you execute the `dapr run` command and specify the component path, the Dapr sidecar:
|
||||
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgres.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgres.yaml` file
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgresql.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgresql.yaml` file
|
||||
|
||||
With the `binding-postgres.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
With the `binding-postgresql.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
|
||||
The PostgreSQL `binding-postgres.yaml` file included for this Quickstart contains the following:
|
||||
The PostgreSQL `binding-postgresql.yaml` file included for this Quickstart contains the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -413,7 +413,7 @@ metadata:
|
|||
name: sqldb
|
||||
namespace: quickstarts
|
||||
spec:
|
||||
type: bindings.postgres
|
||||
type: bindings.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
|
@ -506,7 +506,7 @@ app.MapPost("/" + cronBindingName, async () => {
|
|||
});
|
||||
```
|
||||
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgres.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgresql.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
|
||||
```csharp
|
||||
// ...
|
||||
|
@ -599,16 +599,16 @@ spec:
|
|||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
||||
#### `component\binding-postgres.yaml` component file
|
||||
#### `component\binding-postgresql.yaml` component file
|
||||
|
||||
When you execute the `dapr run` command and specify the component path, the Dapr sidecar:
|
||||
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgres.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgres.yaml` file
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgresql.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgresql.yaml` file
|
||||
|
||||
With the `binding-postgres.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
With the `binding-postgresql.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
|
||||
The PostgreSQL `binding-postgres.yaml` file included for this Quickstart contains the following:
|
||||
The PostgreSQL `binding-postgresql.yaml` file included for this Quickstart contains the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -617,7 +617,7 @@ metadata:
|
|||
name: sqldb
|
||||
namespace: quickstarts
|
||||
spec:
|
||||
type: bindings.postgres
|
||||
type: bindings.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
|
@ -711,7 +711,7 @@ The code inside the `process_batch` function is executed every 10 seconds (defin
|
|||
public ResponseEntity<String> processBatch() throws IOException, Exception
|
||||
```
|
||||
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgres.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgresql.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
|
||||
```java
|
||||
try (DaprClient client = new DaprClientBuilder().build()) {
|
||||
|
@ -809,16 +809,16 @@ spec:
|
|||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
||||
#### `component\binding-postgres.yaml` component file
|
||||
#### `component\binding-postgresql.yaml` component file
|
||||
|
||||
When you execute the `dapr run` command and specify the component path, the Dapr sidecar:
|
||||
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgres.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgres.yaml` file
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgresql.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgresql.yaml` file
|
||||
|
||||
With the `binding-postgres.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
With the `binding-postgresql.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
|
||||
The PostgreSQL `binding-postgres.yaml` file included for this Quickstart contains the following:
|
||||
The PostgreSQL `binding-postgresql.yaml` file included for this Quickstart contains the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -827,7 +827,7 @@ metadata:
|
|||
name: sqldb
|
||||
namespace: quickstarts
|
||||
spec:
|
||||
type: bindings.postgres
|
||||
type: bindings.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
|
@ -918,7 +918,7 @@ The code inside the `process_batch` function is executed every 10 seconds (defin
|
|||
r.HandleFunc("/"+cronBindingName, processBatch).Methods("POST")
|
||||
```
|
||||
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgres.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
The `batch-sdk` service uses the PostgreSQL output binding defined in the [`binding-postgresql.yaml`]({{< ref "#componentbinding-postgresyaml-component-file" >}}) component to insert the `OrderId`, `Customer`, and `Price` records into the `orders` table.
|
||||
|
||||
```go
|
||||
func sqlOutput(order Order) (err error) {
|
||||
|
@ -1021,16 +1021,16 @@ spec:
|
|||
|
||||
**Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked.
|
||||
|
||||
#### `component\binding-postgres.yaml` component file
|
||||
#### `component\binding-postgresql.yaml` component file
|
||||
|
||||
When you execute the `dapr run` command and specify the component path, the Dapr sidecar:
|
||||
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgres.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgres.yaml` file
|
||||
- Initiates the PostgreSQL [binding building block]({{< ref postgresql.md >}})
|
||||
- Connects to PostgreSQL using the settings specified in the `binding-postgresql.yaml` file
|
||||
|
||||
With the `binding-postgres.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
With the `binding-postgresql.yaml` component, you can easily swap out the backend database [binding]({{< ref supported-bindings.md >}}) without making code changes.
|
||||
|
||||
The PostgreSQL `binding-postgres.yaml` file included for this Quickstart contains the following:
|
||||
The PostgreSQL `binding-postgresql.yaml` file included for this Quickstart contains the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -1039,7 +1039,7 @@ metadata:
|
|||
name: sqldb
|
||||
namespace: quickstarts
|
||||
spec:
|
||||
type: bindings.postgres
|
||||
type: bindings.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
type: docs
|
||||
title: "Quickstart: Configuration"
|
||||
linkTitle: Configuration
|
||||
weight: 76
|
||||
weight: 77
|
||||
description: Get started with Dapr's Configuration building block
|
||||
---
|
||||
|
||||
|
@ -620,6 +620,12 @@ case <-ctx.Done():
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Demo
|
||||
|
||||
Watch this video [demoing the Configuration API quickstart](https://youtu.be/EcE6IGuX9L8?t=94):
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/EcE6IGuX9L8?start=94" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
## Tell us what you think!
|
||||
|
||||
We're continuously working to improve our Quickstart examples and value your feedback. Did you find this quickstart helpful? Do you have suggestions for improvement?
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
type: docs
|
||||
title: "Quickstart: Secrets Management"
|
||||
linkTitle: "Secrets Management"
|
||||
weight: 75
|
||||
weight: 76
|
||||
description: "Get started with Dapr's Secrets Management building block"
|
||||
---
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
type: docs
|
||||
title: "Quickstart: Workflow"
|
||||
linkTitle: Workflow
|
||||
weight: 77
|
||||
weight: 78
|
||||
description: Get started with the Dapr Workflow building block
|
||||
---
|
||||
|
||||
|
@ -151,25 +151,29 @@ string orderId = Guid.NewGuid().ToString()[..8];
|
|||
string itemToPurchase = "Cars";
|
||||
int ammountToPurchase = 10;
|
||||
|
||||
//...
|
||||
// Construct the order
|
||||
OrderPayload orderInfo = new OrderPayload(itemToPurchase, 15000, ammountToPurchase);
|
||||
|
||||
// Start the workflow
|
||||
Console.WriteLine("Starting workflow {0} purchasing {1} {2}", orderId, ammountToPurchase, itemToPurchase);
|
||||
|
||||
await workflowClient.ScheduleNewWorkflowAsync(
|
||||
name: nameof(OrderProcessingWorkflow),
|
||||
await daprClient.StartWorkflowAsync(
|
||||
workflowComponent: DaprWorkflowComponent,
|
||||
workflowName: nameof(OrderProcessingWorkflow),
|
||||
input: orderInfo,
|
||||
instanceId: orderId);
|
||||
|
||||
// Wait for the workflow to start and confirm the input
|
||||
GetWorkflowResponse state = await daprClient.WaitForWorkflowStartAsync(
|
||||
instanceId: orderId,
|
||||
input: orderInfo);
|
||||
workflowComponent: DaprWorkflowComponent);
|
||||
|
||||
//...
|
||||
Console.WriteLine("Your workflow has started. Here is the status of the workflow: {0}", state.RuntimeStatus);
|
||||
|
||||
WorkflowState state = await workflowClient.GetWorkflowStateAsync(
|
||||
// Wait for the workflow to complete
|
||||
state = await daprClient.WaitForWorkflowCompletionAsync(
|
||||
instanceId: orderId,
|
||||
getInputsAndOutputs: true);
|
||||
|
||||
Console.WriteLine("Your workflow has started. Here is the status of the workflow: {0}", state);
|
||||
|
||||
//...
|
||||
workflowComponent: DaprWorkflowComponent);
|
||||
|
||||
Console.WriteLine("Workflow Status: {0}", state.RuntimeStatus);
|
||||
```
|
||||
|
|
|
@ -16,17 +16,7 @@ In this tutorial, you will create a component definition file to interact with t
|
|||
|
||||
## Step 1: Create a JSON secret store
|
||||
|
||||
Dapr supports [many types of secret stores]({{< ref supported-secret-stores >}}), but for this tutorial, create a local JSON file named `mysecrets.json` with the following secret:
|
||||
|
||||
```json
|
||||
{
|
||||
"my-secret" : "I'm Batman"
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2: Create a secret store Dapr component
|
||||
|
||||
1. Create a new directory named `my-components` to hold the new component file:
|
||||
1. Create a new directory named `my-components` to hold the new secret and component file:
|
||||
|
||||
```bash
|
||||
mkdir my-components
|
||||
|
@ -38,6 +28,16 @@ Dapr supports [many types of secret stores]({{< ref supported-secret-stores >}})
|
|||
cd my-components
|
||||
```
|
||||
|
||||
1. Dapr supports [many types of secret stores]({{< ref supported-secret-stores >}}), but for this tutorial, create a local JSON file named `mysecrets.json` with the following secret:
|
||||
|
||||
```json
|
||||
{
|
||||
"my-secret" : "I'm Batman"
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2: Create a secret store Dapr component
|
||||
|
||||
1. Create a new file `localSecretStore.yaml` with the following contents:
|
||||
|
||||
```yaml
|
||||
|
@ -51,13 +51,13 @@ Dapr supports [many types of secret stores]({{< ref supported-secret-stores >}})
|
|||
version: v1
|
||||
metadata:
|
||||
- name: secretsFile
|
||||
value: <PATH TO SECRETS FILE>/mysecrets.json
|
||||
value: ./mysecrets.json
|
||||
- name: nestedSeparator
|
||||
value: ":"
|
||||
```
|
||||
|
||||
In the above file definition:
|
||||
- `type: secretstores.local.file` tells Dapr to use the local file component as a secret store.
|
||||
- `type: secretstores.local.file` tells Dapr to use the local file component as a secret store.
|
||||
- The metadata fields provide component-specific information needed to work with this component. In this case, the secret store JSON path is relative to where you call `dapr run`.
|
||||
|
||||
## Step 3: Run the Dapr sidecar
|
||||
|
@ -65,7 +65,7 @@ In the above file definition:
|
|||
Launch a Dapr sidecar that will listen on port 3500 for a blank application named `myapp`:
|
||||
|
||||
```bash
|
||||
dapr run --app-id myapp --dapr-http-port 3500 --resources-path ./my-components
|
||||
dapr run --app-id myapp --dapr-http-port 3500 --resources-path .
|
||||
```
|
||||
|
||||
{{% alert title="Tip" color="primary" %}}
|
||||
|
@ -104,4 +104,4 @@ Invoke-RestMethod -Uri 'http://localhost:3500/v1.0/secrets/my-secret-store/my-se
|
|||
{"my-secret":"I'm Batman"}
|
||||
```
|
||||
|
||||
{{< button text="Next step: Set up a Pub/sub broker >>" page="pubsub-quickstart" >}}
|
||||
{{< button text="Next step: Set up a Pub/sub broker >>" page="pubsub-quickstart" >}}
|
||||
|
|
|
@ -79,13 +79,22 @@ The following table lists the properties for tracing:
|
|||
| `samplingRate` | string | Set sampling rate for tracing to be enabled or disabled.
|
||||
| `stdout` | bool | True write more verbose information to the traces
|
||||
| `otel.endpointAddress` | string | Set the Open Telemetry (OTEL) server address to send traces to
|
||||
| `otel.isSecure` | bool | Is the connection to the endpoint address encryped
|
||||
| `otel.isSecure` | bool | Is the connection to the endpoint address encrypted
|
||||
| `otel.protocol` | string | Set to `http` or `grpc` protocol
|
||||
| `zipkin.endpointAddress` | string | Set the Zipkin server address to send traces to
|
||||
|
||||
`samplingRate` is used to enable or disable the tracing. To disable the sampling rate ,
|
||||
set `samplingRate : "0"` in the configuration. The valid range of samplingRate is between 0 and 1 inclusive. The sampling rate determines whether a trace span should be sampled or not based on value. `samplingRate : "1"` samples all traces. By default, the sampling rate is (0.0001) or 1 in 10,000 traces.
|
||||
|
||||
The OpenTelemetry (otel) endpoint can also be configured via an environment variables. The presence of the OTEL_EXPORTER_OTLP_ENDPOINT environment variable
|
||||
turns on tracing for the sidecar.
|
||||
|
||||
| Environment Variable | Description |
|
||||
|----------------------|-------------|
|
||||
| `OTEL_EXPORTER_OTLP_ENDPOINT` | Sets the Open Telemetry (OTEL) server address, turns on tracing |
|
||||
| `OTEL_EXPORTER_OTLP_INSECURE` | Sets the connection to the endpoint as unencrypted (true/false) |
|
||||
| `OTEL_EXPORTER_OTLP_PROTOCOL` | Transport protocol (`grpc`, `http/protobuf`, `http/json`) |
|
||||
|
||||
See [Observability distributed tracing]({{< ref "tracing-overview.md" >}}) for more information.
|
||||
|
||||
#### Metrics
|
||||
|
|
|
@ -236,6 +236,12 @@ The injector watchdog is disabled by default when running Dapr in Kubernetes mod
|
|||
|
||||
Refer to the documentation for the [Dapr operator]({{< ref operator >}}) service for more details on the injector watchdog and how to enable it.
|
||||
|
||||
## Configuring seccompProfile for sidecar containers
|
||||
|
||||
By default, the Dapr sidecar Injector injects a sidecar without any `seccompProfile`. However, to have Dapr sidecar container run successfully in a namespace with [Restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) profile, the sidecar container needs to have `securityContext.seccompProfile.Type` to not be `nil`.
|
||||
|
||||
Refer to [this]({{< ref "arguments-annotations-overview.md" >}}) documentation to set appropriate `seccompProfile` on sidecar container according to which profile it is running with.
|
||||
|
||||
## Best Practices
|
||||
|
||||
Watch this video for a deep dive into the best practices for running Dapr in production with Kubernetes
|
||||
|
|
|
@ -33,7 +33,7 @@ The following table lists the properties for tracing:
|
|||
| `samplingRate` | string | Set sampling rate for tracing to be enabled or disabled.
|
||||
| `stdout` | bool | True write more verbose information to the traces
|
||||
| `otel.endpointAddress` | string | Set the Open Telemetry (OTEL) server address.
|
||||
| `otel.isSecure` | bool | Is the connection to the endpoint address encryped.
|
||||
| `otel.isSecure` | bool | Is the connection to the endpoint address encrypted.
|
||||
| `otel.protocol` | string | Set to `http` or `grpc` protocol.
|
||||
| `zipkin.endpointAddress` | string | Set the Zipkin server address. If this is used, you do not need to specify the `otel` section.
|
||||
|
||||
|
@ -58,3 +58,14 @@ spec:
|
|||
Dapr uses probabilistic sampling. The sample rate defines the probability a tracing span will be sampled and can have a value between 0 and 1 (inclusive). The default sample rate is 0.0001 (i.e. 1 in 10,000 spans is sampled).
|
||||
|
||||
Changing `samplingRate` to 0 disables tracing altogether.
|
||||
|
||||
## Environment variables
|
||||
|
||||
The OpenTelemetry (otel) endpoint can also be configured via an environment variables. The presence of the OTEL_EXPORTER_OTLP_ENDPOINT environment variable
|
||||
turns on tracing for the sidecar.
|
||||
|
||||
| Environment Variable | Description |
|
||||
|----------------------|-------------|
|
||||
| `OTEL_EXPORTER_OTLP_ENDPOINT` | Sets the Open Telemetry (OTEL) server address, turns on tracing |
|
||||
| `OTEL_EXPORTER_OTLP_INSECURE` | Sets the connection to the endpoint as unencrypted (true/false) |
|
||||
| `OTEL_EXPORTER_OTLP_PROTOCOL` | Transport protocol (`grpc`, `http/protobuf`, `http/json`) |
|
|
@ -17,10 +17,11 @@ For CLI there is no explicit opt-in, just the version that this was first made a
|
|||
| --- | --- | --- | --- | --- |
|
||||
| **App Middleware** | Allow middleware components to be executed when making service-to-service calls | N/A | [App Middleware]({{<ref "middleware.md#app-middleware" >}}) | v1.9 |
|
||||
| **Streaming for HTTP service invocation** | Enables (partial) support for using streams in HTTP service invocation; see below for more details. | `ServiceInvocationStreaming` | [Details]({{< ref "support-preview-features.md#streaming-for-http-service-invocation" >}}) | v1.10 |
|
||||
| **App health checks** | Allows configuring app health checks | `AppHealthCheck` | [App health checks]({{<ref "app-health.md" >}}) | v1.9 |
|
||||
| **Pluggable components** | Allows creating self-hosted gRPC-based components written in any language that supports gRPC. The following component APIs are supported: State stores, Pub/sub, Bindings | N/A | [Pluggable components concept]({{<ref "components-concept#pluggable-components" >}})| v1.9 |
|
||||
| **Multi-App Run** | Configure multiple Dapr applications from a single configuration file and run from a single command | `dapr run -f` | [Multi-App Run]({{< ref multi-app-dapr-run.md >}}) | v1.10 |
|
||||
| **Workflows** | Author workflows as code to automate and orchestrate tasks within your application, like messaging, state management, and failure handling | N/A | [Workflows concept]({{< ref "components-concept#workflows" >}})| v1.10 |
|
||||
| **Service invocation for non-Dapr endpoints** | Allow the invocation of non-Dapr endpoints by Dapr using the [Service invocation API]({{< ref service_invocation_api.md >}}). Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information. | N/A | [Service invocation API]({{< ref service_invocation_api.md >}}) | v1.11 |
|
||||
| **Actor State TTL** | Allow actors to save records to state stores with Time To Live (TTL) set to automatically clean up old data. In its current implementation, actor state with TTL may not be reflected correctly by clients, read [Actor State Transactions]({{< ref actors_api.md >}}) for more information. | `ActorStateTTL` | [Actor State Transactions]({{< ref actors_api.md >}}) | v1.11 |
|
||||
|
||||
### Streaming for HTTP service invocation
|
||||
|
||||
|
@ -30,20 +31,20 @@ The table below summarizes the current state of support for streaming in HTTP se
|
|||
|
||||
<img src="/images/service-invocation-simple.webp" width=600 alt="Diagram showing the steps of service invocation described in the table below" />
|
||||
|
||||
| Step | Handles data as a stream | Dapr 1.10 | Dapr 1.10 with<br/>`ServiceInvocationStreaming` |
|
||||
| Step | Handles data as a stream | Dapr 1.11 | Dapr 1.11 with<br/>`ServiceInvocationStreaming` |
|
||||
|:---:|---|:---:|:---:|
|
||||
| 1 | Request: "App A" to "Dapr sidecar A | <span role="img" aria-label="No">❌</span> | <span role="img" aria-label="No">❌</span> |
|
||||
| 2 | Request: "Dapr sidecar A" to "Dapr sidecar B | <span role="img" aria-label="No">❌</span> | <span role="img" aria-label="Yes">✅</span> |
|
||||
| 3 | Request: "Dapr sidecar B" to "App B" | <span role="img" aria-label="Yes">✅</span> | <span role="img" aria-label="Yes">✅</span> |
|
||||
| 4 | Response: "App B" to "Dapr sidecar B" | <span role="img" aria-label="Yes">✅</span> | <span role="img" aria-label="Yes">✅</span> |
|
||||
| 5 | Response: "Dapr sidecar B" to "Dapr sidecar A | <span role="img" aria-label="No">❌</span> | <span role="img" aria-label="Yes">✅</span> |
|
||||
| 6 | Response: "Dapr sidecar A" to "App A | <span role="img" aria-label="No">❌</span> | <span role="img" aria-label="Yes">✅</span> |
|
||||
| 6 | Response: "Dapr sidecar A" to "App A | <span role="img" aria-label="No">❌</span> | <span role="img" aria-label="No">❌</span> |
|
||||
|
||||
Important notes:
|
||||
|
||||
- `ServiceInvocationStreaming` needs to be applied on caller sidecars only.
|
||||
In the example above, streams are used for HTTP service invocation if `ServiceInvocationStreaming` is applied to the configuration of "app A" and its Dapr sidecar, regardless of whether the feature flag is enabled for "app B" and its sidecar.
|
||||
- When `ServiceInvocationStreaming` is enabled, you should make sure that all services your app invokes using Dapr ("app B") are updated to Dapr 1.10, even if `ServiceInvocationStreaming` is not enabled for those sidecars.
|
||||
Invoking an app using Dapr 1.9 or older is still possible, but those calls may fail if you have applied a Dapr Resiliency policy with retries enabled.
|
||||
- When `ServiceInvocationStreaming` is enabled, you should make sure that all services your app invokes using Dapr ("app B") are updated to Dapr 1.10 or higher, even if `ServiceInvocationStreaming` is not enabled for those sidecars.
|
||||
Invoking an app using Dapr 1.9 or older is still possible, but those calls may fail unless you have applied a Dapr Resiliency policy with retries enabled.
|
||||
|
||||
> Full support for streaming for HTTP service invocation will be completed in a future Dapr version.
|
||||
|
|
|
@ -75,6 +75,19 @@ Persists the change to the state for an actor as a multi-item transaction.
|
|||
|
||||
***Note that this operation is dependant on a using state store component that supports multi-item transactions.***
|
||||
|
||||
#### TTL
|
||||
|
||||
With the [`ActorStateTTL` feature enabled]]({{< ref
|
||||
"support-preview-features.md" >}}), actor clients can set the `ttlInSeconds`
|
||||
field in the transaction metadata to have the state expire after that many
|
||||
seconds. If the `ttlInSeconds` field is not set, the state will not expire.
|
||||
|
||||
Keep in mind when building actor applications with this feature enabled;
|
||||
Currently, all actor SDKs will preserve the actor state in their local cache even after the state has expired. This means that the actor state will not be removed from the local cache if the TTL has expired until the actor is restarted or deactivated. This behaviour will be changed in a future release.
|
||||
|
||||
See the Dapr Community Call 80 recording for more details on actor state TTL.
|
||||
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/kVpQYkGemRc?start=28" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
```
|
||||
|
@ -101,6 +114,8 @@ Parameter | Description
|
|||
|
||||
#### Examples
|
||||
|
||||
> Note, the following example uses the `ttlInSeconds` field, which requires the [`ActorStateTTL` feature enabled]]({{< ref "support-preview-features.md" >}}).
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3500/v1.0/actors/stormtrooper/50/state \
|
||||
-H "Content-Type: application/json" \
|
||||
|
@ -109,7 +124,10 @@ curl -X POST http://localhost:3500/v1.0/actors/stormtrooper/50/state \
|
|||
"operation": "upsert",
|
||||
"request": {
|
||||
"key": "key1",
|
||||
"value": "myData"
|
||||
"value": "myData",
|
||||
"metadata": {
|
||||
"ttlInSeconds": "3600"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -176,7 +194,7 @@ Creates a persistent reminder for an actor.
|
|||
POST/PUT http://localhost:<daprPort>/v1.0/actors/<actorType>/<actorId>/reminders/<name>
|
||||
```
|
||||
|
||||
#### Request Body
|
||||
#### Reminder request body
|
||||
|
||||
A JSON object with the following fields:
|
||||
|
||||
|
@ -340,7 +358,8 @@ Creates a timer for an actor.
|
|||
POST/PUT http://localhost:<daprPort>/v1.0/actors/<actorType>/<actorId>/timers/<name>
|
||||
```
|
||||
|
||||
Body:
|
||||
#### Timer request body:
|
||||
The format for the timer request body is the same as for [actor reminders]({{< ref "#reminder-request-body" >}}). For example:
|
||||
|
||||
The following specifies a `dueTime` of 3 seconds and a period of 7 seconds.
|
||||
|
||||
|
@ -473,6 +492,16 @@ Parameter | Description
|
|||
`maxStackDepth` | A value in the reentrancy configuration that controls how many reentrant calls be made to the same actor.
|
||||
`entitiesConfig` | Array of entity configurations that allow per actor type settings. Any configuration defined here must have an entity that maps back into the root level entities.
|
||||
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Actor settings in configuration for timeouts and intervals use [time.ParseDuration](https://pkg.go.dev/time#ParseDuration) format. You can use string formats to represent durations. For example:
|
||||
- `1h30m` or `1.5h`: A duration of 1 hour and 30 minutes
|
||||
- `1d12h`: A duration of 1 day and 12 hours
|
||||
- `500ms`: A duration of 500 milliseconds
|
||||
- `-30m`: A negative duration of 30 minutes
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
```json
|
||||
{
|
||||
"entities":["actorType1", "actorType2"],
|
||||
|
|
|
@ -13,7 +13,7 @@ This endpoint lets you get configuration from a store.
|
|||
### HTTP Request
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/<storename>
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/<storename>
|
||||
```
|
||||
|
||||
#### URL Parameters
|
||||
|
@ -29,13 +29,13 @@ If no query parameters are provided, all configuration items are returned.
|
|||
To specify the keys of the configuration items to get, use one or more `key` query parameters. For example:
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/mystore?key=config1&key=config2
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/mystore?key=config1&key=config2
|
||||
```
|
||||
|
||||
To retrieve all configuration items:
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/mystore
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/mystore
|
||||
```
|
||||
|
||||
#### Request Body
|
||||
|
@ -59,7 +59,7 @@ JSON-encoded value of key/value pairs for each configuration item.
|
|||
### Example
|
||||
|
||||
```shell
|
||||
curl -X GET 'http://localhost:3500/v1.0-alpha1/configuration/mystore?key=myConfigKey'
|
||||
curl -X GET 'http://localhost:3500/v1.0/configuration/mystore?key=myConfigKey'
|
||||
```
|
||||
|
||||
> The above command returns the following JSON:
|
||||
|
@ -75,7 +75,7 @@ This endpoint lets you subscribe to configuration changes. Notifications happen
|
|||
### HTTP Request
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/<storename>/subscribe
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/<storename>/subscribe
|
||||
```
|
||||
|
||||
#### URL Parameters
|
||||
|
@ -91,13 +91,13 @@ If no query parameters are provided, all configuration items are subscribed to.
|
|||
To specify the keys of the configuration items to subscribe to, use one or more `key` query parameters. For example:
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/mystore/subscribe?key=config1&key=config2
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/mystore/subscribe?key=config1&key=config2
|
||||
```
|
||||
|
||||
To subscribe to all changes:
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/mystore/subscribe
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/mystore/subscribe
|
||||
```
|
||||
|
||||
#### Request Body
|
||||
|
@ -121,7 +121,7 @@ JSON-encoded value
|
|||
### Example
|
||||
|
||||
```shell
|
||||
curl -X GET 'http://localhost:3500/v1.0-alpha1/configuration/mystore/subscribe?key=myConfigKey'
|
||||
curl -X GET 'http://localhost:3500/v1.0/configuration/mystore/subscribe?key=myConfigKey'
|
||||
```
|
||||
|
||||
> The above command returns the following JSON:
|
||||
|
@ -141,7 +141,7 @@ This endpoint lets you unsubscribe to configuration changes.
|
|||
### HTTP Request
|
||||
|
||||
```
|
||||
GET http://localhost:<daprPort>/v1.0-alpha1/configuration/<storename>/<subscription-id>/unsubscribe
|
||||
GET http://localhost:<daprPort>/v1.0/configuration/<storename>/<subscription-id>/unsubscribe
|
||||
```
|
||||
|
||||
#### URL Parameters
|
||||
|
@ -181,7 +181,7 @@ Code | Description
|
|||
### Example
|
||||
|
||||
```shell
|
||||
curl -X GET 'http://localhost:3500/v1.0-alpha1/configuration/mystore/bf3aa454-312d-403c-af95-6dec65058fa2/unsubscribe'
|
||||
curl -X GET 'http://localhost:3500/v1.0/configuration/mystore/bf3aa454-312d-403c-af95-6dec65058fa2/unsubscribe'
|
||||
```
|
||||
|
||||
## Optional application (user code) routes
|
||||
|
|
|
@ -6,7 +6,7 @@ description: "Detailed documentation on the Metadata API"
|
|||
weight: 1100
|
||||
---
|
||||
|
||||
Dapr has a metadata API that returns information about the sidecar allowing runtime discoverability. The metadata endpoint returns a list of the components loaded, the activated actors (if present) and attributes with information attached.
|
||||
Dapr has a metadata API that returns information about the sidecar allowing runtime discoverability. The metadata endpoint returns a list of the resources (components and HttpEndpoints loaded), the activated actors (if present), and attributes with information attached.
|
||||
|
||||
## Components
|
||||
Each loaded component provides its name, type and version and also information about supported features in the form of component capabilities.
|
||||
|
@ -17,6 +17,9 @@ Component type | Capabilities
|
|||
State Store | ETAG, TRANSACTION, ACTOR, QUERY_API
|
||||
Binding | INPUT_BINDING, OUTPUT_BINDING
|
||||
|
||||
## HTTPEndpoints
|
||||
Each loaded `HttpEndpoint` provides a name to easily identify the Dapr resource associated with the runtime.
|
||||
|
||||
## Attributes
|
||||
|
||||
The metadata API allows you to store additional attribute information in the format of key-value pairs. These are ephemeral in-memory and are not persisted if a sidecar is reloaded. This information should be added at the time of a sidecar creation, for example, after the application has started.
|
||||
|
|
|
@ -6,17 +6,29 @@ description: "Detailed documentation on the service invocation API"
|
|||
weight: 100
|
||||
---
|
||||
|
||||
Dapr provides users with the ability to call other applications that have unique ids.
|
||||
This functionality allows apps to interact with one another via named identifiers and puts the burden of service discovery on the Dapr runtime.
|
||||
Dapr provides users with the ability to call other applications that are using Dapr with a unique named identifier (appId), or HTTP endpoints that are not using Dapr.
|
||||
This allows applications to interact with one another via named identifiers and puts the burden of service discovery on the Dapr runtime.
|
||||
|
||||
## Invoke a method on a remote dapr app
|
||||
## Invoke a method on a remote Dapr app
|
||||
|
||||
This endpoint lets you invoke a method in another Dapr enabled app.
|
||||
|
||||
### HTTP Request
|
||||
|
||||
```
|
||||
PATCH/POST/GET/PUT/DELETE http://localhost:<daprPort>/v1.0/invoke/<appId>/method/<method-name>
|
||||
PATCH/POST/GET/PUT/DELETE http://localhost:<daprPort>/v1.0/invoke/<appID>/method/<method-name>
|
||||
```
|
||||
|
||||
## Invoke a method on a non-Dapr endpoint
|
||||
|
||||
This endpoint lets you invoke a method on a non-Dapr endpoint using an `HTTPEndpoint` resource name, or a Fully Qualified Domain Name (FQDN) URL.
|
||||
|
||||
### HTTP Request
|
||||
|
||||
```
|
||||
PATCH/POST/GET/PUT/DELETE http://localhost:<daprPort>/v1.0/invoke/<HTTPEndpoint name>/method/<method-name>
|
||||
|
||||
PATCH/POST/GET/PUT/DELETE http://localhost:<daprPort>/v1.0/invoke/<FQDN URL>/method/<method-name>
|
||||
```
|
||||
|
||||
### HTTP Response codes
|
||||
|
@ -38,7 +50,9 @@ XXX | Upstream status returned
|
|||
Parameter | Description
|
||||
--------- | -----------
|
||||
daprPort | the Dapr port
|
||||
appId | the App ID associated with the remote app
|
||||
appID | the App ID associated with the remote app
|
||||
HTTPEndpoint name | the HTTPEndpoint resource associated with the external endpoint
|
||||
FQDN URL | Fully Qualified Domain Name URL to invoke on the external endpoint
|
||||
method-name | the name of the method or url to invoke on the remote app
|
||||
|
||||
> Note, all URL parameters are case-sensitive.
|
||||
|
@ -65,9 +79,9 @@ Within the body of the request place the data you want to send to the service:
|
|||
|
||||
### Request received by invoked service
|
||||
|
||||
Once your service code invokes a method in another Dapr enabled app, Dapr will send the request, along with the headers and body, to the app on the `<method-name>` endpoint.
|
||||
Once your service code invokes a method in another Dapr enabled app or non-Dapr endpoint, Dapr sends the request, along with the headers and body, on the `<method-name>` endpoint.
|
||||
|
||||
The Dapr app being invoked will need to be listening for and responding to requests on that endpoint.
|
||||
The Dapr app or non-Dapr endpoint being invoked will need to be listening for and responding to requests on that endpoint.
|
||||
|
||||
### Cross namespace invocation
|
||||
|
||||
|
@ -120,5 +134,19 @@ In case you are invoking `mathService` on a different namespace, you can use the
|
|||
|
||||
In this URL, `testing` is the namespace that `mathService` is running in.
|
||||
|
||||
#### Non-Dapr Endpoint Example
|
||||
|
||||
If the `mathService` service was a non-Dapr application, then it could be invoked using service invocation via an `HTTPEndpoint`, as well as a Fully Qualified Domain Name (FQDN) URL.
|
||||
|
||||
```shell
|
||||
curl http://localhost:3500/v1.0/invoke/mathHTTPEndpoint/method/add \
|
||||
-H "Content-Type: application/json"
|
||||
-d '{ "arg1": 10, "arg2": 23}'
|
||||
|
||||
curl http://localhost:3500/v1.0/invoke/http://mathServiceURL.com/method/add \
|
||||
-H "Content-Type: application/json"
|
||||
-d '{ "arg1": 10, "arg2": 23}'
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
- [How-To: Invoke and discover services]({{< ref howto-invoke-discover-services.md >}})
|
||||
|
|
|
@ -10,29 +10,25 @@ Dapr provides users with the ability to interact with workflows and comes with a
|
|||
|
||||
## Start workflow request
|
||||
|
||||
Start a workflow instance with the given name and instance ID.
|
||||
Start a workflow instance with the given name and optionally, an instance ID.
|
||||
|
||||
```bash
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<workflowName>/<instanceId>/start
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<workflowName>/start[?instanceId=<instanceId>]
|
||||
```
|
||||
|
||||
Note that workflow instance IDs can only contain alphanumeric characters, underscores, and dashes.
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Current default is `dapr` for Dapr Workflows
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`workflowName` | Identify the workflow type
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
`instanceId` | (Optional) Unique value created for each run of a specific workflow
|
||||
|
||||
### Request content
|
||||
|
||||
In the request you can pass along relevant input information that will be passed to the workflow:
|
||||
|
||||
```json
|
||||
{
|
||||
"input": // argument(s) to pass to the workflow which can be any valid JSON data type (such as objects, strings, numbers, arrays, etc.)
|
||||
}
|
||||
```
|
||||
Any request content will be passed to the workflow as input. The Dapr API passes the content as-is without attempting to interpret it.
|
||||
|
||||
### HTTP response codes
|
||||
|
||||
|
@ -48,9 +44,7 @@ The API call will provide a response similar to this:
|
|||
|
||||
```json
|
||||
{
|
||||
"WFInfo": {
|
||||
"instance_id": "SampleWorkflow"
|
||||
}
|
||||
"instanceID": "12345678"
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -58,16 +52,15 @@ The API call will provide a response similar to this:
|
|||
|
||||
Terminate a running workflow instance with the given name and instance ID.
|
||||
|
||||
```bash
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/terminate
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<instanceId>/terminate
|
||||
```
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Current default is `dapr` for Dapr Workflows
|
||||
`workflowName` | Identify the workflow type
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
|
||||
### HTTP response codes
|
||||
|
@ -80,65 +73,166 @@ Code | Description
|
|||
|
||||
### Response content
|
||||
|
||||
The API call will provide a response similar to this:
|
||||
This API does not return any content.
|
||||
|
||||
```bash
|
||||
HTTP/1.1 202 Accepted
|
||||
Server: fasthttp
|
||||
Date: Thu, 12 Jan 2023 21:31:16 GMT
|
||||
Traceparent: 00-e3dedffedbeb9efbde9fbed3f8e2d8-5f38960d43d24e98-01
|
||||
Connection: close
|
||||
## Raise Event request
|
||||
|
||||
For workflow components that support subscribing to external events, such as the Dapr Workflow engine, you can use the following "raise event" API to deliver a named event to a specific workflow instance.
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceID>/raiseEvent/<eventName>
|
||||
```
|
||||
|
||||
### Get workflow request
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
The exact mechanism for subscribing to an event depends on the workflow component that you're using. Dapr Workflow has one way of subscribing to external events but other workflow components might have different ways.
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
`eventName` | The name of the event to raise
|
||||
|
||||
### HTTP response codes
|
||||
|
||||
Code | Description
|
||||
---- | -----------
|
||||
`202` | Accepted
|
||||
`400` | Request was malformed
|
||||
`500` | Request formatted correctly, error in dapr code or underlying component
|
||||
|
||||
### Response content
|
||||
|
||||
None.
|
||||
|
||||
## Pause workflow request
|
||||
|
||||
Pause a running workflow instance.
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/pause
|
||||
```
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
|
||||
### HTTP response codes
|
||||
|
||||
Code | Description
|
||||
---- | -----------
|
||||
`202` | Accepted
|
||||
`400` | Request was malformed
|
||||
`500` | Error in Dapr code or underlying component
|
||||
|
||||
### Response content
|
||||
|
||||
None.
|
||||
|
||||
## Resume workflow request
|
||||
|
||||
Resume a paused workflow instance.
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/resume
|
||||
```
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
|
||||
### HTTP response codes
|
||||
|
||||
Code | Description
|
||||
---- | -----------
|
||||
`202` | Accepted
|
||||
`400` | Request was malformed
|
||||
`500` | Error in Dapr code or underlying component
|
||||
|
||||
### Response content
|
||||
|
||||
None.
|
||||
|
||||
## Purge workflow request
|
||||
|
||||
Purge the workflow state from your state store with the workflow's instance ID.
|
||||
|
||||
```http
|
||||
POST http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/purge
|
||||
```
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
|
||||
### HTTP response codes
|
||||
|
||||
Code | Description
|
||||
---- | -----------
|
||||
`202` | Accepted
|
||||
`400` | Request was malformed
|
||||
`500` | Error in Dapr code or underlying component
|
||||
|
||||
### Response content
|
||||
|
||||
None.
|
||||
|
||||
## Get workflow request
|
||||
|
||||
Get information about a given workflow instance.
|
||||
|
||||
```bash
|
||||
GET http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<workflowName>/<instanceId>
|
||||
```http
|
||||
GET http://localhost:3500/v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>
|
||||
```
|
||||
|
||||
### URL parameters
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`workflowComponentName` | Current default is `dapr` for Dapr Workflows
|
||||
`workflowName` | Identify the workflow type
|
||||
`workflowComponentName` | Use `dapr` for Dapr Workflows
|
||||
`instanceId` | Unique value created for each run of a specific workflow
|
||||
|
||||
### HTTP response codes
|
||||
|
||||
Code | Description
|
||||
---- | -----------
|
||||
`202` | Accepted
|
||||
`200` | OK
|
||||
`400` | Request was malformed
|
||||
`500` | Request formatted correctly, error in dapr code or underlying component
|
||||
|
||||
### Response content
|
||||
|
||||
The API call will provide a response similar to this:
|
||||
|
||||
```bash
|
||||
HTTP/1.1 202 Accepted
|
||||
Server: fasthttp
|
||||
Date: Thu, 12 Jan 2023 21:31:16 GMT
|
||||
Content-Type: application/json
|
||||
Content-Length: 139
|
||||
Traceparent: 00-e3dedffedbeb9efbde9fbed3f8e2d8-5f38960d43d24e98-01
|
||||
Connection: close
|
||||
The API call will provide a JSON response similar to this:
|
||||
|
||||
```json
|
||||
{
|
||||
"WFInfo": {
|
||||
"instance_id": "SampleWorkflow"
|
||||
},
|
||||
"start_time": "2023-01-12T21:31:13Z",
|
||||
"metadata": {
|
||||
"status": "Running",
|
||||
"task_queue": "WorkflowSampleQueue"
|
||||
}
|
||||
"createdAt": "2023-01-12T21:31:13Z",
|
||||
"instanceID": "12345678",
|
||||
"lastUpdatedAt": "2023-01-12T21:31:13Z",
|
||||
"properties": {
|
||||
"property1": "value1",
|
||||
"property2": "value2",
|
||||
},
|
||||
"runtimeStatus": "RUNNING",
|
||||
}
|
||||
```
|
||||
|
||||
Parameter | Description
|
||||
--------- | -----------
|
||||
`runtimeStatus` | The status of the workflow instance. Values include: `RUNNING`, `TERMINATED`, `PAUSED`
|
||||
|
||||
## Component format
|
||||
|
||||
A Dapr `workflow.yaml` component file has the following structure:
|
||||
|
|
|
@ -8,14 +8,13 @@ aliases:
|
|||
- "/operations/hosting/kubernetes/kubernetes-annotations/"
|
||||
---
|
||||
|
||||
This table is meant to help users understand the equivalent options for running Dapr sidecars in different contexts–via the [CLI]({{< ref cli-overview.md >}}) directly, via daprd, or on [Kubernetes]({{< ref kubernetes-overview.md >}}) via annotations.
|
||||
This table is meant to help users understand the equivalent options for running Dapr sidecars in different contexts: via the [CLI]({{< ref cli-overview.md >}}) directly, via daprd, or on [Kubernetes]({{< ref kubernetes-overview.md >}}) via annotations.
|
||||
|
||||
| daprd | Dapr CLI | CLI shorthand | Kubernetes annotations | Description|
|
||||
|----- | ------- | -----------| ----------| ------------ |
|
||||
| `--allowed-origins` | not supported | | not supported | Allowed HTTP origins (default "*") |
|
||||
| `--app-id` | `--app-id` | `-i` | `dapr.io/app-id` | The unique ID of the application. Used for service discovery, state encapsulation and the pub/sub consumer ID |
|
||||
| `--app-port` | `--app-port` | `-p` | `dapr.io/app-port` | This parameter tells Dapr which port your application is listening on |
|
||||
| `--app-ssl` | `--app-ssl` | | `dapr.io/app-ssl` | Sets the URI scheme of the app to https and attempts an SSL connection |
|
||||
| `--components-path` | `--components-path` | `-d` | not supported | **Deprecated** in favor of `--resources-path` |
|
||||
| `--resources-path` | `--resources-path` | `-d` | not supported | Path for components directory. If empty, components will not be loaded. |
|
||||
| `--config` | `--config` | `-c` | `dapr.io/config` | Tells Dapr which Configuration CRD to use |
|
||||
|
@ -37,8 +36,8 @@ This table is meant to help users understand the equivalent options for running
|
|||
| `--metrics-port` | `--metrics-port` | | `dapr.io/metrics-port` | Sets the port for the sidecar metrics server. Default is `9090` |
|
||||
| `--mode` | not supported | | not supported | Runtime mode for Dapr (default "standalone") |
|
||||
| `--placement-host-address` | `--placement-host-address` | | `dapr.io/placement-host-address` | Comma separated list of addresses for Dapr Actor Placement servers. When no annotation is set, the default value is set by the Sidecar Injector. When the annotation is set and the value is empty, the sidecar does not connect to Placement server. This can be used when there are no actors running in the sidecar. When the annotation is set and the value is not empty, the sidecar connects to the configured address. For example: `127.0.0.1:50057,127.0.0.1:50058` |
|
||||
| `--profiling-port` | `--profiling-port` | | not supported | The port for the profile server (default "7777") |
|
||||
| `--app-protocol` | `--app-protocol` | `-P` | `dapr.io/app-protocol` | Tells Dapr which protocol your application is using. Valid options are `http` and `grpc`. Default is `http` |
|
||||
| `--profiling-port` | `--profiling-port` | | not supported | The port for the profile server (default `7777`) |
|
||||
| `--app-protocol` | `--app-protocol` | `-P` | `dapr.io/app-protocol` | Configures the protocol Dapr uses to communicate with your app. Valid options are `http`, `grpc`, `https` (HTTP with TLS), `grpcs` (gRPC with TLS), `h2c` (HTTP/2 Cleartext). Note that Dapr does not validate TLS certificates presented by the app. Default is `http` |
|
||||
| `--enable-app-health-check` | `--enable-app-health-check` | | `dapr.io/enable-app-health-check` | Boolean that enables the health checks. Default is `false`. |
|
||||
| `--app-health-check-path` | `--app-health-check-path` | | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC). Requires app health checks to be enabled. Default is `/health` |
|
||||
| `--app-health-probe-interval` | `--app-health-probe-interval` | | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe. Requires app health checks to be enabled. Default is `5` |
|
||||
|
@ -66,3 +65,4 @@ This table is meant to help users understand the equivalent options for running
|
|||
| not supported | not supported | | `dapr.io/volume-mounts` | List of pod volumes to be mounted to the sidecar container in read-only mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. |
|
||||
| not supported | not supported | | `dapr.io/volume-mounts-rw` | List of pod volumes to be mounted to the sidecar container in read-write mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. |
|
||||
| `--disable-builtin-k8s-secret-store` | not supported | | `dapr.io/disable-builtin-k8s-secret-store` | Disables BuiltIn Kubernetes secret store. Default value is false. See [Kubernetes secret store component]({{<ref "kubernetes-secret-store.md">}}) for details. |
|
||||
| not supported | not supported | | `dapr.io/sidecar-seccomp-profile-type` | Set the sidecar container's `securityContext.seccompProfile.type` to `Unconfined`, `RuntimeDefault`, or `Localhost`. By default, this annotation is not set on the Dapr sidecar, hence the field is omitted from sidecar container. |
|
||||
|
|
|
@ -28,8 +28,7 @@ dapr annotate [flags] CONFIG-FILE
|
|||
| `--app-id, -a` | | | The app id to annotate |
|
||||
| `--app-max-concurrency` | | `-1` | The maximum number of concurrent requests to allow |
|
||||
| `--app-port, -p` | | `-1` | The port to expose the app on |
|
||||
| `--app-protocol` | | | The protocol to use for the app |
|
||||
| `--app-ssl` | | `false` | Enable SSL for the app |
|
||||
| `--app-protocol` | | | The protocol to use for the app: `http` (default), `grpc`, `https`, `grpcs`, `h2c` |
|
||||
| `--app-token-secret` | | | The secret to use for the app token |
|
||||
| `--config, -c` | | | The config file to annotate |
|
||||
| `--cpu-limit` | | | The CPU limit to set for the sidecar. See valid values [here](https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/). |
|
||||
|
|
|
@ -26,8 +26,7 @@ dapr run [flags] [command]
|
|||
| `--app-id`, `-a` | `APP_ID` | | The id for your application, used for service discovery. Cannot contain dots. |
|
||||
| `--app-max-concurrency` | | `unlimited` | The concurrency level of the application; default is unlimited |
|
||||
| `--app-port`, `-p` | `APP_PORT` | | The port your application is listening on |
|
||||
| `--app-protocol`, `-P` | | `http` | The protocol Dapr uses to talk to the application. Valid values are: `http` or `grpc` |
|
||||
| `--app-ssl` | | `false` | Enable https when Dapr invokes the application |
|
||||
| `--app-protocol`, `-P` | | `http` | The protocol Dapr uses to talk to the application. Valid values are: `http`, `grpc`, `https` (HTTP with TLS), `grpcs` (gRPC with TLS), `h2c` (HTTP/2 Cleartext) |
|
||||
| `--resources-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components` <br/>Windows: `%USERPROFILE%\.dapr\components` | The path for components directory |
|
||||
| `--runtime-path` | | | Dapr runtime install path |
|
||||
| `--config`, `-c` | | Linux/Mac: `$HOME/.dapr/config.yaml` <br/>Windows: `%USERPROFILE%\.dapr\config.yaml` | Dapr configuration file |
|
||||
|
|
|
@ -39,6 +39,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|--------------------|:--------:|------------|-----|---------|
|
||||
| endpoint | Y | Output | GraphQL endpoint string See [here](#url-format) for more details | `"http://localhost:4000/graphql/graphql"` |
|
||||
| header:[HEADERKEY] | N | Output | GraphQL header. Specify the header key in the `name`, and the header value in the `value`. | `"no-cache"` (see above) |
|
||||
| variable:[VARIABLEKEY] | N | Output | GraphQL query variable. Specify the variable name in the `name`, and the variable value in the `value`. | `"123"` (see below) |
|
||||
|
||||
### Endpoint and Header format
|
||||
|
||||
|
@ -65,6 +66,18 @@ Metadata: map[string]string{ "query": `query { users { name } }`},
|
|||
}
|
||||
```
|
||||
|
||||
To use a `query` that requires [query variables](https://graphql.org/learn/queries/#variables), add a key-value pair to the `metadata` map, wherein every key corresponding to a query variable is the variable name prefixed with `variable:`
|
||||
|
||||
```golang
|
||||
in := &dapr.InvokeBindingRequest{
|
||||
Name: "example.bindings.graphql",
|
||||
Operation: "query",
|
||||
Metadata: map[string]string{
|
||||
"query": `query HeroNameAndFriends($episode: string!) { hero(episode: $episode) { name } }`,
|
||||
"variable:episode": "JEDI",
|
||||
}
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
|
|
|
@ -7,6 +7,10 @@ aliases:
|
|||
- "/operations/components/setup-bindings/supported-bindings/http/"
|
||||
---
|
||||
|
||||
## Alternative
|
||||
|
||||
The [service invocation API]({{< ref service_invocation_api.md >}}) allows for the invocation of non-Dapr HTTP endpoints and is the recommended approach. Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information.
|
||||
|
||||
## Setup Dapr component
|
||||
|
||||
```yaml
|
||||
|
@ -21,11 +25,13 @@ spec:
|
|||
- name: url
|
||||
value: http://something.com
|
||||
- name: MTLSRootCA
|
||||
value: /Users/somepath/root.pem # OPTIONAL <path to root CA> or <pem encoded string>
|
||||
value: /Users/somepath/root.pem # OPTIONAL Secret store ref, <path to root CA>, or <pem encoded string>
|
||||
- name: MTLSClientCert
|
||||
value: /Users/somepath/client.pem # OPTIONAL <path to client cert> or <pem encoded string>
|
||||
value: /Users/somepath/client.pem # OPTIONAL Secret store ref, <path to client cert>, or <pem encoded string>
|
||||
- name: MTLSClientKey
|
||||
value: /Users/somepath/client.key # OPTIONAL <path to client key> or <pem encoded string>
|
||||
value: /Users/somepath/client.key # OPTIONAL Secret store ref, <path to client key>, or <pem encoded string>
|
||||
- name: MTLSRenegotiation
|
||||
value: RenegotiateOnceAsClient # OPTIONAL one of: RenegotiateNever, RenegotiateOnceAsClient, RenegotiateFreelyAsClient
|
||||
- name: securityToken # OPTIONAL <token to include as a header on HTTP requests>
|
||||
secretKeyRef:
|
||||
name: mysecret
|
||||
|
@ -39,12 +45,43 @@ spec:
|
|||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|--------|--------|---------|
|
||||
| url | Y | Output |The base URL of the HTTP endpoint to invoke | `http://host:port/path`, `http://myservice:8000/customers`
|
||||
| MTLSRootCA | N | Output |Path to root ca certificate or pem encoded string |
|
||||
| MTLSClientCert | N | Output |Path to client certificate or pem encoded string |
|
||||
| MTLSClientKey | N | Output |Path client private key or pem encoded string |
|
||||
| MTLSRootCA | N | Output |Secret store reference, path to root ca certificate, or pem encoded string |
|
||||
| MTLSClientCert | N | Output |Secret store reference, path to client certificate, or pem encoded string |
|
||||
| MTLSClientKey | N | Output |Secret store reference, path client private key, or pem encoded string |
|
||||
| MTLSRenegotiation | N | Output |Type of TLS renegotiation to be used | `RenegotiateOnceAsClient`
|
||||
| securityToken | N | Output |The value of a token to be added to an HTTP request as a header. Used together with `securityTokenHeader` |
|
||||
| securityTokenHeader| N | Output |The name of the header for `securityToken` on an HTTP request that |
|
||||
|
||||
### How to configure MTLS related fields in Metadata
|
||||
The values for **MTLSRootCA**, **MTLSClientCert** and **MTLSClientKey** can be provided in three ways:
|
||||
1. Secret store reference
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: <NAME>
|
||||
spec:
|
||||
type: bindings.http
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url
|
||||
value: http://something.com
|
||||
- name: MTLSRootCA
|
||||
secretKeyRef:
|
||||
name: mysecret
|
||||
key: myrootca
|
||||
auth:
|
||||
secretStore: <NAME_OF_SECRET_STORE_COMPONENT>
|
||||
```
|
||||
2. Path to the file: The absolute path to the file can be provided as a value for the field.
|
||||
3. PEM encoded string: The PEM encoded string can also be provided as a value for the field.
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
Metadata fields **MTLSRootCA**, **MTLSClientCert** and **MTLSClientKey** are used to configure TLS(m) authentication.
|
||||
To use mTLS authentication, you must provide all three fields. See [mTLS]({{< ref "#using-mtls-or-enabling-client-tls-authentication-along-with-https" >}}) for more details. You can also provide only **MTLSRootCA**, to enable **HTTPS** connection. See [HTTPS]({{< ref "#install-the-ssl-certificate-in-the-sidecar" >}}) section for more details.
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
## Binding support
|
||||
|
||||
This component supports **output binding** with the following [HTTP methods/verbs](https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html):
|
||||
|
@ -309,6 +346,10 @@ curl -d '{ "operation": "get" }' \
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
HTTPS binding support can also be configured using the **MTLSRootCA** metadata option. This will add the specified certificate to the list of trusted certificates for the binding. There's no specific preference for either method. While the **MTLSRootCA** option is easy to use and doesn't require any changes to the sidecar, it accepts only one certificate. If you need to trust multiple certificates, you need to [install them in the sidecar by following the steps above]({{< ref "#install-the-ssl-certificate-in-the-sidecar" >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
## Using mTLS or enabling client TLS authentication along with HTTPS
|
||||
You can configure the HTTP binding to use mTLS or client TLS authentication along with HTTPS by providing the `MTLSRootCA`, `MTLSClientCert`, and `MTLSClientKey` metadata fields in the binding component.
|
||||
|
||||
|
@ -317,6 +358,13 @@ These fields can be passed as a file path or as a pem encoded string.
|
|||
- If the pem encoded string is provided, the string is used as is.
|
||||
When these fields are configured, the Dapr sidecar uses the provided certificate to authenticate itself with the server during the TLS handshake process.
|
||||
|
||||
If the remote server is enforcing TLS renegotiation, you also need to set the metadata field `MTLSRenegotiation`. This field accepts one of following options:
|
||||
- `RenegotiateNever`
|
||||
- `RenegotiateOnceAsClient`
|
||||
- `RenegotiateFreelyAsClient`.
|
||||
|
||||
For more details see [the Go `RenegotiationSupport` documentation](https://pkg.go.dev/crypto/tls#RenegotiationSupport).
|
||||
|
||||
### When to use:
|
||||
You can use this when the server with which the HTTP binding is configured to communicate requires mTLS or client TLS authentication.
|
||||
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Kitex"
|
||||
linkTitle: "Kitex"
|
||||
description: "Detailed documentation on the Kitex binding component"
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/kitex/"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The binding for Kitex mainly utilizes the generic-call feature in Kitex. Learn more from the official documentation around [Kitex generic-call](https://www.cloudwego.io/docs/kitex/tutorials/advanced-feature/generic-call/).
|
||||
Currently, Kitex only supports Thrift generic calls. The implementation integrated into [components-contrib](https://github.com/dapr/components-contrib/tree/master/bindings/kitex) adopts binary generic calls.
|
||||
|
||||
|
||||
## Component format
|
||||
|
||||
To setup an Kitex binding, create a component of type `bindings.kitex`. See the [How-to: Use output bindings to interface with external resources]({{< ref "howto-bindings.md#1-create-a-binding" >}}) guide on creating and applying a binding configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: bindings.kitex
|
||||
spec:
|
||||
type: bindings.kitex
|
||||
version: v1
|
||||
metadata:
|
||||
```
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
The `InvokeRequest.Metadata` for `bindings.kitex` requires the client to fill in four required items when making a call:
|
||||
- `hostPorts`
|
||||
- `destService`
|
||||
- `methodName`
|
||||
- `version`
|
||||
|
||||
| Field | Required | Binding support | Details | Example |
|
||||
|-------------|:--------:|--------|---------------------------------------------------------------------------------------------------------|--------------------|
|
||||
| hostPorts | Y | Output | IP address and port information of the Kitex server (Thrift) | `"127.0.0.1:8888"` |
|
||||
| destService | Y | Output | Service name of the Kitex server (Thrift) | `"echo"` |
|
||||
| methodName | Y | Output | Method name under a specific service name of the Kitex server (Thrift) | `"echo"` |
|
||||
| version | Y | Output | Kitex version | `"0.5.0"` |
|
||||
|
||||
|
||||
## Binding support
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
|
||||
- `get`
|
||||
|
||||
## Example
|
||||
|
||||
When using Kitex binding:
|
||||
- The client needs to pass in the correct Thrift-encoded binary
|
||||
- The server needs to be a Thrift Server.
|
||||
|
||||
The [kitex_output_test](https://github.com/dapr/components-contrib/blob/master/bindings/kitex/kitex_output_test.go) can be used as a reference.
|
||||
For example, the variable `reqData` needs to be _encoded_ by the Thrift protocol before sending, and the returned data needs to be decoded by the Thrift protocol.
|
||||
|
||||
**Request**
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "get",
|
||||
"metadata": {
|
||||
"hostPorts": "127.0.0.1:8888",
|
||||
"destService": "echo",
|
||||
"methodName": "echo",
|
||||
"version":"0.5.0"
|
||||
},
|
||||
"data": reqdata
|
||||
}
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- [Bindings building block]({{< ref bindings >}})
|
||||
- [How-To: Trigger application with input binding]({{< ref howto-triggers.md >}})
|
||||
- [How-To: Use bindings to interface with external resources]({{< ref howto-bindings.md >}})
|
||||
- [Bindings API reference]({{< ref bindings_api.md >}})
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "MySQL binding spec"
|
||||
linkTitle: "MySQL"
|
||||
title: "MySQL & MariaDB binding spec"
|
||||
linkTitle: "MySQL & MariaDB"
|
||||
description: "Detailed documentation on the MySQL binding component"
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/mysql/"
|
||||
|
@ -9,7 +9,9 @@ aliases:
|
|||
|
||||
## Component format
|
||||
|
||||
To setup MySQL binding create a component of type `bindings.mysql`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
|
||||
The MySQL binding allows connecting to both MySQL and MariaDB databases. In this document, we refer to "MySQL" to indicate both databases.
|
||||
|
||||
To setup a MySQL binding create a component of type `bindings.mysql`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
|
||||
|
||||
The MySQL binding uses [Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) internally.
|
||||
|
||||
|
|
|
@ -4,12 +4,13 @@ title: "PostgreSQL binding spec"
|
|||
linkTitle: "PostgreSQL"
|
||||
description: "Detailed documentation on the PostgreSQL binding component"
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/postgresql/"
|
||||
- "/operations/components/setup-bindings/supported-bindings/postgres/"
|
||||
---
|
||||
|
||||
## Component format
|
||||
|
||||
To setup PostgreSQL binding create a component of type `bindings.postgres`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
|
||||
To setup PostgreSQL binding create a component of type `bindings.postgresql`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
|
||||
|
||||
|
||||
```yaml
|
||||
|
@ -18,7 +19,7 @@ kind: Component
|
|||
metadata:
|
||||
name: <NAME>
|
||||
spec:
|
||||
type: bindings.postgres
|
||||
type: bindings.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url # Required
|
||||
|
@ -33,7 +34,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|------------|-----|---------|
|
||||
| url | Y | Output | Postgres connection string See [here](#url-format) for more details | `"user=dapr password=secret host=dapr.example.com port=5432 dbname=dapr sslmode=verify-ca"` |
|
||||
| url | Y | Output | PostgreSQL connection string See [here](#url-format) for more details | `"user=dapr password=secret host=dapr.example.com port=5432 dbname=dapr sslmode=verify-ca"` |
|
||||
|
||||
### URL format
|
||||
|
||||
|
@ -144,8 +145,7 @@ Finally, the `close` operation can be used to explicitly close the DB connection
|
|||
}
|
||||
```
|
||||
|
||||
|
||||
> Note, the PostgreSql binding itself doesn't prevent SQL injection, like with any database application, validate the input before executing query.
|
||||
> Note, the PostgreSQL binding itself doesn't prevent SQL injection, like with any database application, validate the input before executing query.
|
||||
|
||||
## Related links
|
||||
|
|
@ -39,6 +39,16 @@ spec:
|
|||
value: 5
|
||||
- name: contentType
|
||||
value: "text/plain"
|
||||
- name: reconnectWaitInSeconds
|
||||
value: 5
|
||||
- name: externalSasl
|
||||
value: false
|
||||
- name: caCert
|
||||
value: null
|
||||
- name: clientCert
|
||||
value: null
|
||||
- name: clientKey
|
||||
value: null
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
|
@ -50,7 +60,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|------------|-----|---------|
|
||||
| queueName | Y | Input/Output | The RabbitMQ queue name | `"myqueue"` |
|
||||
| host | Y | Input/Output | The RabbitMQ host address | `"amqp://[username][:password]@host.domain[:port]"` |
|
||||
| host | Y | Input/Output | The RabbitMQ host address | `"amqp://[username][:password]@host.domain[:port]"` or with TLS: `"amqps://[username][:password]@host.domain[:port]"` |
|
||||
| durable | N | Output | Tells RabbitMQ to persist message in storage. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| deleteWhenUnused | N | Input/Output | Enables or disables auto-delete. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| ttlInSeconds | N | Output | Set the [default message time to live at RabbitMQ queue level](https://www.rabbitmq.com/ttl.html). If this parameter is omitted, messages won't expire, continuing to exist on the queue until processed. See [also](#specifying-a-ttl-per-message) | `60` |
|
||||
|
@ -58,6 +68,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| exclusive | N | Input/Output | Determines whether the topic will be an exclusive topic or not. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| maxPriority| N | Input/Output | Parameter to set the [priority queue](https://www.rabbitmq.com/priority.html). If this parameter is omitted, queue will be created as a general queue instead of a priority queue. Value between 1 and 255. See [also](#specifying-a-priority-per-message) | `"1"`, `"10"` |
|
||||
| contentType | N | Input/Output | The content type of the message. Defaults to "text/plain". | `"text/plain"`, `"application/cloudevent+json"` and so on |
|
||||
| reconnectWaitInSeconds | N | Input/Output | Represents the duration in seconds that the client should wait before attempting to reconnect to the server after a disconnection occurs. Defaults to `"5"`. | `"5"`, `"10"` |
|
||||
| externalSasl | N | Input/Output | With TLS, should the username be taken from an additional field (e.g. CN.) See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` |
|
||||
| caCert | N | Input/Output | The CA certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` |
|
||||
| clientCert | N | Input/Output | The client certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` |
|
||||
| clientKey | N | Input/Output | The client key to use for TLS connection. Defaults to `null`. | `"-----BEGIN PRIVATE KEY-----\nMI..."` |
|
||||
## Binding support
|
||||
|
||||
This component supports both **input and output** binding interfaces.
|
||||
|
|
|
@ -70,6 +70,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using.
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
### S3 Bucket Creation
|
||||
{{< tabs "Minio" "LocalStack" "AWS" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
### Using with Minio
|
||||
|
||||
[Minio](https://min.io/) is a service that exposes local storage as S3-compatible block storage, and it's a popular alternative to S3 especially in development environments. You can use the S3 binding with Minio too, with some configuration tweaks:
|
||||
|
@ -78,6 +83,70 @@ When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernet
|
|||
3. The value for `region` is not important; you can set it to `us-east-1`.
|
||||
4. Depending on your environment, you may need to set `disableSSL` to `true` if you're connecting to Minio using a non-secure connection (using the `http://` protocol). If you are using a secure connection (`https://` protocol) but with a self-signed certificate, you may need to set `insecureSSL` to `true`.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
For local development, the [LocalStack project](https://github.com/localstack/localstack) is used to integrate AWS S3. Follow [these instructions](https://github.com/localstack/localstack#running) to run LocalStack.
|
||||
|
||||
To run LocalStack locally from the command line using Docker, use a `docker-compose.yaml` similar to the following:
|
||||
|
||||
```yaml
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
localstack:
|
||||
container_name: "cont-aws-s3"
|
||||
image: localstack/localstack:1.4.0
|
||||
ports:
|
||||
- "127.0.0.1:4566:4566"
|
||||
environment:
|
||||
- DEBUG=1
|
||||
- DOCKER_HOST=unix:///var/run/docker.sock
|
||||
volumes:
|
||||
- "<PATH>/init-aws.sh:/etc/localstack/init/ready.d/init-aws.sh" # init hook
|
||||
- "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack"
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
```
|
||||
|
||||
To use the S3 component, you need to use an existing bucket. The example above uses a [LocalStack Initialization Hook](https://docs.localstack.cloud/references/init-hooks/) to setup the bucket.
|
||||
|
||||
To use LocalStack with your S3 binding, you need to provide the `endpoint` configuration in the component metadata. The `endpoint` is unnecessary when running against production AWS.
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: aws-s3
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.aws.s3
|
||||
version: v1
|
||||
metadata:
|
||||
- name: bucket
|
||||
value: conformance-test-docker
|
||||
- name: endpoint
|
||||
value: "http://localhost:4566"
|
||||
- name: accessKey
|
||||
value: "my-access"
|
||||
- name: secretKey
|
||||
value: "my-secret"
|
||||
- name: region
|
||||
value: "us-east-1"
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
To use the S3 component, you need to use an existing bucket. Follow the [AWS documentation for creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html).
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Binding support
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
|
|
|
@ -50,9 +50,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|------------|-----|---------|
|
||||
| region | Y | Output | The specific AWS region | `"eu-west-1"` |
|
||||
| accessKey | Y | Output | The AWS Access Key to access this resource | `"key"` |
|
||||
| secretKey | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` |
|
||||
| region | N | Output | The specific AWS region | `"eu-west-1"` |
|
||||
| accessKey | N | Output | The AWS Access Key to access this resource | `"key"` |
|
||||
| secretKey | N | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` |
|
||||
| sessionToken | N | Output | The AWS session token to use | `"sessionToken"` |
|
||||
| emailFrom | N | Output | If set, this specifies the email address of the sender. See [also](#example-request) | `"me@example.com"` |
|
||||
| emailTo | N | Output | If set, this specifies the email address of the receiver. See [also](#example-request) | `"me@example.com"` |
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Twitter binding spec"
|
||||
linkTitle: "Twitter"
|
||||
description: "Detailed documentation on the Twitter binding component"
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/twitter/"
|
||||
---
|
||||
|
||||
{{% alert title="Deprecation notice" color="warning" %}}
|
||||
The Twitter binding component has been deprecated and will be removed in a future release. See [this GitHub issue](https://github.com/dapr/components-contrib/issues/2503) for details.
|
||||
{{% /alert %}}
|
||||
|
||||
## Component format
|
||||
|
||||
To setup Twitter binding create a component of type `bindings.twitter`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: <NAME>
|
||||
spec:
|
||||
type: bindings.twitter
|
||||
version: v1
|
||||
metadata:
|
||||
- name: consumerKey
|
||||
value: "****" # twitter api consumer key, required
|
||||
- name: consumerSecret
|
||||
value: "****" # twitter api consumer secret, required
|
||||
- name: accessToken
|
||||
value: "****" # twitter api access token, required
|
||||
- name: accessSecret
|
||||
value: "****" # twitter api access secret, required
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Binding support | Details | Example |
|
||||
|--------------------|:--------:|------------|-----|---------|
|
||||
| consumerKey | Y | Input/Output | Twitter API consumer key | `"conusmerkey"` |
|
||||
| consumerSecret | Y | Input/Output | Twitter API consumer secret | `"conusmersecret"` |
|
||||
| accessToken | Y | Input/Output | Twitter API access token | `"accesstoken"` |
|
||||
| accessSecret | Y | Input/Output | Twitter API access secret | `"accesssecret"` |
|
||||
|
||||
## Binding support
|
||||
|
||||
This component supports both **input and output** binding interfaces.
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
|
||||
- `get`
|
||||
|
||||
### Input binding
|
||||
|
||||
For input binding, where the query matching Tweets are streamed to the user service, the above component has to also include a query:
|
||||
|
||||
```yaml
|
||||
- name: query
|
||||
value: "dapr" # your search query, required
|
||||
```
|
||||
|
||||
### Output binding
|
||||
#### get
|
||||
|
||||
For output binding invocation the user code has to invoke the binding:
|
||||
|
||||
```shell
|
||||
POST http://localhost:3500/v1.0/bindings/twitter
|
||||
```
|
||||
|
||||
Where the payload is:
|
||||
|
||||
```json
|
||||
{
|
||||
"data": "",
|
||||
"metadata": {
|
||||
"query": "twitter-query",
|
||||
"lang": "optional-language-code",
|
||||
"result": "valid-result-type"
|
||||
},
|
||||
"operation": "get"
|
||||
}
|
||||
```
|
||||
|
||||
The metadata parameters are:
|
||||
|
||||
- `query` - any valid Twitter query (e.g. `dapr` or `dapr AND serverless`). See [Twitter docs](https://developer.twitter.com/en/docs/tweets/rules-and-filtering/overview/standard-operators) for more details on advanced query formats
|
||||
- `lang` - (optional, default: `en`) restricts result tweets to the given language using [ISO 639-1 language code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)
|
||||
- `result` - (optional, default: `recent`) specifies tweet query result type. Valid values include:
|
||||
- `mixed` - both popular and real time results
|
||||
- `recent` - most recent results
|
||||
- `popular` - most popular results
|
||||
|
||||
You can see the example of the JSON data that Twitter binding returns [here](https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets)
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- [Bindings building block]({{< ref bindings >}})
|
||||
- [How-To: Trigger application with input binding]({{< ref howto-triggers.md >}})
|
||||
- [How-To: Use bindings to interface with external resources]({{< ref howto-bindings.md >}})
|
||||
- [Bindings API reference]({{< ref bindings_api.md >}})
|
|
@ -0,0 +1,103 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Wasm"
|
||||
linkTitle: "Wasm"
|
||||
description: "Detailed documentation on the WebAssembly binding component"
|
||||
aliases:
|
||||
- "/operations/components/setup-bindings/supported-bindings/wasm/"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
With WebAssembly, you can safely run code compiled in other languages. Runtimes
|
||||
execute WebAssembly Modules (Wasm), which are most often binaries with a `.wasm`
|
||||
extension.
|
||||
|
||||
The Wasm Binding allows you to invoke a program compiled to Wasm by passing
|
||||
commandline args or environment variables to it, similar to how you would with
|
||||
a normal subprocess. For example, you can satisfy an invocation using Python,
|
||||
even though Dapr is written in Go and is running on a platform that doesn't have
|
||||
Python installed!
|
||||
|
||||
The Wasm binary must be a program compiled with the WebAssembly System
|
||||
Interface (WASI). The binary can be a program you've written such as in Go, or
|
||||
an interpreter you use to run inlined scripts, such as Python.
|
||||
|
||||
Minimally, you must specify a Wasm binary compiled with the canonical WASI
|
||||
version `wasi_snapshot_preview1` (a.k.a. `wasip1`), often abbreviated to `wasi`.
|
||||
|
||||
> **Note:** If compiling in Go 1.21+, this is `GOOS=wasip1 GOARCH=wasm`. In TinyGo, Rust, and Zig, this is the target `wasm32-wasi`.
|
||||
|
||||
You can also re-use an existing binary. For example, [Wasm Language Runtimes](https://github.com/vmware-labs/webassembly-language-runtimes)
|
||||
distributes interpreters (including PHP, Python, and Ruby) already compiled to
|
||||
WASI.
|
||||
|
||||
Wasm binaries are loaded from a URL. For example, the URL `file://rewrite.wasm`
|
||||
loads `rewrite.wasm` from the current directory of the process. On Kubernetes,
|
||||
see [How to: Mount Pod volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts.md >}})
|
||||
to configure a filesystem mount that can contain Wasm binaries.
|
||||
|
||||
Dapr uses [wazero](https://wazero.io) to run these binaries, because it has no
|
||||
dependencies. This allows use of WebAssembly with no installation process
|
||||
except Dapr itself.
|
||||
|
||||
## Component format
|
||||
|
||||
To configure a Wasm binding, create a component of type
|
||||
`bindings.wasm`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}})
|
||||
on how to create and apply a binding configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: wasm
|
||||
spec:
|
||||
type: bindings.wasm
|
||||
version: v1
|
||||
metadata:
|
||||
- name: url
|
||||
value: "file://uppercase.wasm"
|
||||
```
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Details | Required | Example |
|
||||
|-------|----------------------------------------------------------------|----------|----------------|
|
||||
| url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm` |
|
||||
|
||||
|
||||
## Binding support
|
||||
|
||||
This component supports **output binding** with the following operations:
|
||||
|
||||
- `execute`
|
||||
|
||||
## Example request
|
||||
|
||||
The `data` field, if present will be the program's STDIN. You can optionally
|
||||
pass metadata properties with each request:
|
||||
|
||||
- `args` any CLI arguments, comma-separated. This excludes the program name.
|
||||
|
||||
For example, if the binding `url` was a Ruby interpreter, such as from
|
||||
[webassembly-language-runtimes](https://github.com/vmware-labs/webassembly-language-runtimes/releases/tag/ruby%2F3.2.0%2B20230215-1349da9),
|
||||
the following request would respond back with "Hello, salaboy":
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "execute",
|
||||
"metadata": {
|
||||
"args": "-ne,'print \"Hello, \"; print'"
|
||||
},
|
||||
"data": "salaboy"
|
||||
}
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- [Bindings building block]({{< ref bindings >}})
|
||||
- [How-To: Trigger application with input binding]({{< ref howto-triggers.md >}})
|
||||
- [How-To: Use bindings to interface with external resources]({{< ref howto-bindings.md >}})
|
||||
- [Bindings API reference]({{< ref bindings_api.md >}})
|
|
@ -100,7 +100,7 @@ The Azure App Configuration store component supports the following optional `lab
|
|||
The label can be populated using query parameters in the request URL:
|
||||
|
||||
```bash
|
||||
GET curl http://localhost:<daprPort>/v1.0-alpha1/configuration/<store-name>?key=<key name>&metadata.label=<label value>
|
||||
GET curl http://localhost:<daprPort>/v1.0/configuration/<store-name>?key=<key name>&metadata.label=<label value>
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Postgres"
|
||||
linkTitle: "Postgres"
|
||||
description: Detailed information on the Postgres configuration store component
|
||||
title: "PostgreSQL"
|
||||
linkTitle: "PostgreSQL"
|
||||
description: Detailed information on the PostgreSQL configuration store component
|
||||
aliases:
|
||||
- "/operations/components/setup-configuration-store/supported-configuration-stores/setup-postgresql/"
|
||||
- "/operations/components/setup-configuration-store/supported-configuration-stores/setup-postgres/"
|
||||
---
|
||||
|
||||
## Component format
|
||||
|
||||
To set up an Postgres configuration store, create a component of type `configuration.postgres`
|
||||
To set up an PostgreSQL configuration store, create a component of type `configuration.postgresql`
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -17,7 +18,7 @@ kind: Component
|
|||
metadata:
|
||||
name: <NAME>
|
||||
spec:
|
||||
type: configuration.postgres
|
||||
type: configuration.postgresql
|
||||
version: v1
|
||||
metadata:
|
||||
- name: connectionString
|
||||
|
@ -38,12 +39,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| connectionString | Y | The connection string for PostgreSQL. Default pool_max_conns = 5 | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test pool_max_conns=10"`
|
||||
| table | Y | table name for configuration information. | `configTable`
|
||||
| table | Y | Table name for configuration information, must be lowercased. | `configtable`
|
||||
|
||||
## Set up Postgres as Configuration Store
|
||||
## Set up PostgreSQL as Configuration Store
|
||||
|
||||
1. Start Postgres Database
|
||||
1. Connect to the Postgres database and setup a configuration table with following schema -
|
||||
1. Start PostgreSQL Database
|
||||
1. Connect to the PostgreSQL database and setup a configuration table with following schema -
|
||||
|
||||
| Field | Datatype | Nullable |Details |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
|
@ -95,19 +96,19 @@ notification = json_build_object(
|
|||
6. Since this is a generic created trigger, map this trigger to `configuration table`
|
||||
```console
|
||||
CREATE TRIGGER config
|
||||
AFTER INSERT OR UPDATE OR DELETE ON configTable
|
||||
AFTER INSERT OR UPDATE OR DELETE ON configtable
|
||||
FOR EACH ROW EXECUTE PROCEDURE notify_event();
|
||||
```
|
||||
7. In the subscribe request add an additional metadata field with key as `pgNotifyChannel` and value should be set to same `channel name` mentioned in `pg_notify`. From the above example, it should be set to `config`
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
When calling `subscribe` API, `metadata.pgNotifyChannel` should be used to specify the name of the channel to listen for notifications from Postgres configuration store.
|
||||
When calling `subscribe` API, `metadata.pgNotifyChannel` should be used to specify the name of the channel to listen for notifications from PostgreSQL configuration store.
|
||||
|
||||
Any number of keys can be added to a subscription request. Each subscription uses an exclusive database connection. It is strongly recommended to subscribe to multiple keys within a single subscription. This helps optimize the number of connections to the database.
|
||||
|
||||
Example of subscribe HTTP API -
|
||||
```ps
|
||||
curl --location --request GET 'http://<host>:<dapr-http-port>/configuration/postgres/subscribe?key=<keyname1>&key=<keyname2>&metadata.pgNotifyChannel=<channel name>'
|
||||
curl --location --request GET 'http://<host>:<dapr-http-port>/configuration/mypostgresql/subscribe?key=<keyname1>&key=<keyname2>&metadata.pgNotifyChannel=<channel name>'
|
||||
```
|
||||
{{% /alert %}}
|
||||
|
||||
|
|
|
@ -20,19 +20,11 @@ spec:
|
|||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: <HOST>
|
||||
value: <address>:6379
|
||||
- name: redisPassword
|
||||
value: <PASSWORD>
|
||||
value: **************
|
||||
- name: enableTLS
|
||||
value: <bool> # Optional. Allowed: true, false.
|
||||
- name: failover
|
||||
value: <bool> # Optional. Allowed: true, false.
|
||||
- name: sentinelMasterName
|
||||
value: <string> # Optional
|
||||
- name: maxRetries
|
||||
value: # Optional
|
||||
- name: maxRetryBackoff
|
||||
value: # Optional
|
||||
value: <bool>
|
||||
|
||||
```
|
||||
|
||||
|
@ -45,14 +37,26 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| redisHost | Y | Connection-string for the redis host | `localhost:6379`, `redis-master.default.svc.cluster.local:6379`
|
||||
| redisPassword | Y | Password for Redis host. No Default. Can be `secretKeyRef` to use a secret reference | `""`, `"KeFg23!"`
|
||||
| enableTLS | N | If the Redis instance supports TLS with public certificates, can be configured to be enabled or disabled. Defaults to `"false"` | `"true"`, `"false"`
|
||||
| maxRetries | N | Maximum number of retries before giving up. Defaults to `3` | `5`, `10`
|
||||
| maxRetryBackoff | N | Maximum backoff between each retry. Defaults to `2` seconds; `"-1"` disables backoff. | `3000000000`
|
||||
| failover | N | Property to enabled failover configuration. Needs sentinalMasterName to be set. The redisHost should be the sentinel host address. See [Redis Sentinel Documentation](https://redis.io/docs/manual/sentinel/). Defaults to `"false"` | `"true"`, `"false"`
|
||||
| sentinelMasterName | N | The sentinel master name. See [Redis Sentinel Documentation](https://redis.io/docs/manual/sentinel/) | `""`, `"127.0.0.1:6379"`
|
||||
|
||||
| redisHost | Y | Output | The Redis host address | `"localhost:6379"` |
|
||||
| redisPassword | Y | Output | The Redis password | `"password"` |
|
||||
| redisUsername | N | Output | Username for Redis host. Defaults to empty. Make sure your Redis server version is 6 or above, and have created acl rule correctly. | `"username"` |
|
||||
| enableTLS | N | Output | If the Redis instance supports TLS with public certificates it can be configured to enable or disable TLS. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| failover | N | Output | Property to enabled failover configuration. Needs sentinelMasterName to be set. Defaults to `"false"` | `"true"`, `"false"`
|
||||
| sentinelMasterName | N | Output | The Sentinel master name. See [Redis Sentinel Documentation](https://redis.io/docs/reference/sentinel-clients/) | `""`, `"127.0.0.1:6379"`
|
||||
| redisType | N | Output | The type of Redis. There are two valid values, one is `"node"` for single node mode, the other is `"cluster"` for Redis cluster mode. Defaults to `"node"`. | `"cluster"`
|
||||
| redisDB | N | Output | Database selected after connecting to Redis. If `"redisType"` is `"cluster"`, this option is ignored. Defaults to `"0"`. | `"0"`
|
||||
| redisMaxRetries | N | Output | Maximum number of times to retry commands before giving up. Default is to not retry failed commands. | `"5"`
|
||||
| redisMinRetryInterval | N | Output | Minimum backoff for Redis commands between each retry. Default is `"8ms"`; `"-1"` disables backoff. | `"8ms"`
|
||||
| redisMaxRetryInterval | N | Output | Maximum backoff for Redis commands between each retry. Default is `"512ms"`;`"-1"` disables backoff. | `"5s"`
|
||||
| dialTimeout | N | Output | Dial timeout for establishing new connections. Defaults to `"5s"`. | `"5s"`
|
||||
| readTimeout | N | Output | Timeout for socket reads. If reached, Redis commands fail with a timeout instead of blocking. Defaults to `"3s"`, `"-1"` for no timeout. | `"3s"`
|
||||
| writeTimeout | N | Output | Timeout for socket writes. If reached, Redis commands fail with a timeout instead of blocking. Defaults is readTimeout. | `"3s"`
|
||||
| poolSize | N | Output | Maximum number of socket connections. Default is 10 connections per every CPU as reported by runtime.NumCPU. | `"20"`
|
||||
| poolTimeout | N | Output | Amount of time client waits for a connection if all connections are busy before returning an error. Default is readTimeout + 1 second. | `"5s"`
|
||||
| maxConnAge | N | Output | Connection age at which the client retires (closes) the connection. Default is to not close aged connections. | `"30m"`
|
||||
| minIdleConns | N | Output | Minimum number of idle connections to keep open in order to avoid the performance degradation associated with creating new connections. Defaults to `"0"`. | `"2"`
|
||||
| idleCheckFrequency | N | Output | Frequency of idle checks made by idle connections reaper. Default is `"1m"`. `"-1"` disables idle connections reaper. | `"-1"`
|
||||
| idleTimeout | N | Output | Amount of time after which the client closes idle connections. Should be less than server's timeout. Default is `"5m"`. `"-1"` disables idle timeout check. | `"10m"`
|
||||
|
||||
## Setup Redis
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ aliases:
|
|||
- /developing-applications/middleware/supported-middleware/middleware-bearer/
|
||||
---
|
||||
|
||||
The bearer [HTTP middleware]({{< ref middleware.md >}}) verifies a [Bearer Token](https://tools.ietf.org/html/rfc6750) using [OpenID Connect](https://openid.net/connect/) on a Web API without modifying the application. This design separates authentication/authorization concerns from the application, so that application operators can adopt and configure authentication/authorization providers without impacting the application code.
|
||||
The bearer [HTTP middleware]({{< ref middleware.md >}}) verifies a [Bearer Token](https://tools.ietf.org/html/rfc6750) using [OpenID Connect](https://openid.net/connect/) on a Web API, without modifying the application. This design separates authentication/authorization concerns from the application, so that application operators can adopt and configure authentication/authorization providers without impacting the application code.
|
||||
|
||||
## Component format
|
||||
|
||||
|
@ -21,17 +21,22 @@ spec:
|
|||
type: middleware.http.bearer
|
||||
version: v1
|
||||
metadata:
|
||||
- name: clientId
|
||||
value: "<your client ID>"
|
||||
- name: issuerURL
|
||||
value: "https://accounts.google.com"
|
||||
- name: audience
|
||||
value: "<your token audience; e.g. the application's client ID>"
|
||||
- name: issuer
|
||||
value: "<your token issuer, e.g. 'https://accounts.google.com'>"
|
||||
|
||||
# Optional values
|
||||
- name: jwksURL
|
||||
value: "https://accounts.google.com/.well-known/openid-configuration"
|
||||
```
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Details | Example |
|
||||
|-------|---------|---------|
|
||||
| clientId | The client ID of your application that is created as part of a credential hosted by a OpenID Connect platform
|
||||
| issuerURL | URL identifier for the service. | `"https://accounts.google.com"`, `"https://login.salesforce.com"`
|
||||
| Field | Required | Details | Example |
|
||||
|-------|:--------:|---------|---------|
|
||||
| `audience` | Y | The audience expected in the tokens. Usually, this corresponds to the client ID of your application that is created as part of a credential hosted by a OpenID Connect platform. |
|
||||
| `issuer` | Y | The issuer authority, which is the value expected in the issuer claim in the tokens. | `"https://accounts.google.com"`, `"https://login.salesforce.com"`
|
||||
| `jwksURL` | N | Address of the JWKS (JWK Set containing the public keys for verifying tokens). If empty, will try to fetch the URL set in the OpenID Configuration document `<issuer>/.well-known/openid-configuration`. | `"https://accounts.google.com/.well-known/openid-configuration"`
|
||||
|
||||
## Dapr configuration
|
||||
|
||||
|
|
|
@ -7,11 +7,12 @@ aliases:
|
|||
- /developing-applications/middleware/supported-middleware/middleware-rate-limit/
|
||||
---
|
||||
|
||||
The rate limit [HTTP middleware]({{< ref middleware.md >}}) allows restricting the maximum number of allowed HTTP requests per second. Rate limiting can protect your application from denial of service (DOS) attacks. DOS attacks can be initiated by malicious 3rd parties but also by bugs in your software (a.k.a. a "friendly fire" DOS attack).
|
||||
The rate limit [HTTP middleware]({{< ref middleware.md >}}) allows restricting the maximum number of allowed HTTP requests per second. Rate limiting can protect your application from Denial of Service (DoS) attacks. DoS attacks can be initiated by malicious 3rd parties but also by bugs in your software (a.k.a. a "friendly fire" DoS attack).
|
||||
|
||||
## Component format
|
||||
|
||||
In the following definition, the maximum requests per second are set to 10:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
|
@ -29,11 +30,15 @@ spec:
|
|||
|
||||
| Field | Details | Example |
|
||||
|-------|---------|---------|
|
||||
| maxRequestsPerSecond | The maximum requests per second by remote IP and path. Something to consider is that **the limit is enforced independently in each Dapr sidecar and not cluster wide** | `10`
|
||||
| `maxRequestsPerSecond` | The maximum requests per second by remote IP.<br>The component looks at the `X-Forwarded-For` and `X-Real-IP` headers to determine the caller's IP. | `10`
|
||||
|
||||
Once the limit is reached, the request will return *HTTP Status code 429: Too Many Requests*.
|
||||
Once the limit is reached, the requests will fail with HTTP Status code *429: Too Many Requests*.
|
||||
|
||||
Alternatively, the [max concurrency setting]({{< ref control-concurrency.md >}}) can be used to rate limit applications and applies to all traffic regardless of remote IP or path.
|
||||
{{% alert title="Important" color="warning" %}}
|
||||
The rate limit is enforced independently in each Dapr sidecar, and not cluster-wide.
|
||||
{{% /alert %}}
|
||||
|
||||
Alternatively, the [max concurrency setting]({{< ref control-concurrency.md >}}) can be used to rate-limit applications and applies to all traffic, regardless of remote IP, protocol, or path.
|
||||
|
||||
## Dapr configuration
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
type: docs
|
||||
title: "WASM"
|
||||
linkTitle: "WASM"
|
||||
description: "Use WASM middleware in your HTTP pipeline"
|
||||
title: "Wasm"
|
||||
linkTitle: "Wasm"
|
||||
description: "Use Wasm middleware in your HTTP pipeline"
|
||||
aliases:
|
||||
- /developing-applications/middleware/supported-middleware/middleware-wasm/
|
||||
---
|
||||
|
@ -17,9 +17,10 @@ binary. In other words, you can extend Dapr using external files that are not
|
|||
pre-compiled into the `daprd` binary. Dapr embeds [wazero](https://wazero.io)
|
||||
to accomplish this without CGO.
|
||||
|
||||
Wasm modules are loaded from a filesystem path. On Kubernetes, see [mounting
|
||||
volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts.md >}}) to configure
|
||||
a filesystem mount that can contain Wasm modules.
|
||||
Wasm binaries are loaded from a URL. For example, the URL `file://rewrite.wasm`
|
||||
loads `rewrite.wasm` from the current directory of the process. On Kubernetes,
|
||||
see [How to: Mount Pod volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts.md >}})
|
||||
to configure a filesystem mount that can contain Wasm modules.
|
||||
|
||||
## Component format
|
||||
|
||||
|
@ -32,8 +33,8 @@ spec:
|
|||
type: middleware.http.wasm
|
||||
version: v1
|
||||
metadata:
|
||||
- name: path
|
||||
value: "./router.wasm"
|
||||
- name: url
|
||||
value: "file://router.wasm"
|
||||
```
|
||||
|
||||
## Spec metadata fields
|
||||
|
@ -41,9 +42,9 @@ spec:
|
|||
Minimally, a user must specify a Wasm binary implements the [http-handler](https://http-wasm.io/http-handler/).
|
||||
How to compile this is described later.
|
||||
|
||||
| Field | Details | Required | Example |
|
||||
|----------|----------------------------------------------------------------|----------|----------------|
|
||||
| path | A relative or absolute path to the Wasm binary to instantiate. | true | "./hello.wasm" |
|
||||
| Field | Details | Required | Example |
|
||||
|-------|----------------------------------------------------------------|----------|----------------|
|
||||
| url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm` |
|
||||
|
||||
## Dapr configuration
|
||||
|
||||
|
@ -109,7 +110,7 @@ func handleRequest(req api.Request, resp api.Response) (next bool, reqCtx uint32
|
|||
```
|
||||
|
||||
If using TinyGo, compile as shown below and set the spec metadata field named
|
||||
"path" to the location of the output (ex "router.wasm"):
|
||||
"url" to the location of the output (for example, `file://router.wasm`):
|
||||
|
||||
```bash
|
||||
tinygo build -o router.wasm -scheduler=none --no-debug -target=wasi router.go`
|
||||
|
@ -145,7 +146,7 @@ func rewrite(requestURI []byte) ([]byte, error) {
|
|||
```
|
||||
|
||||
If using TinyGo, compile as shown below and set the spec metadata field named
|
||||
"path" to the location of the output (ex "example.wasm"):
|
||||
"url" to the location of the output (for example, `file://example.wasm`):
|
||||
|
||||
```bash
|
||||
tinygo build -o example.wasm -scheduler=none --no-debug -target=wasi example.go
|
||||
|
|
|
@ -25,6 +25,10 @@ spec:
|
|||
value: service_account
|
||||
- name: projectId
|
||||
value: <PROJECT_ID> # replace
|
||||
- name: endpoint # Optional.
|
||||
value: "http://localhost:8085"
|
||||
- name: consumerID # Optional - defaults to the app's own ID
|
||||
value: <CONSUMER_ID>
|
||||
- name: identityProjectId
|
||||
value: <IDENTITY_PROJECT_ID> # replace
|
||||
- name: privateKeyId
|
||||
|
@ -46,11 +50,17 @@ spec:
|
|||
- name: disableEntityManagement
|
||||
value: "false"
|
||||
- name: enableMessageOrdering
|
||||
value: "false"
|
||||
value: "false"
|
||||
- name: orderingKey # Optional
|
||||
value: <ORDERING_KEY>
|
||||
- name: maxReconnectionAttempts # Optional
|
||||
value: 30
|
||||
- name: connectionRecoveryInSec # Optional
|
||||
value: 2
|
||||
- name: deadLetterTopic # Optional
|
||||
value: <EXISTING_PUBSUB_TOPIC>
|
||||
- name: maxDeliveryAttempts # Optional
|
||||
value: 5
|
||||
```
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
|
||||
|
@ -60,8 +70,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| type | N | GCP credentials type. Only `service_account` is supported. Defaults to `service_account` | `service_account`
|
||||
| projectId | Y | GCP project id| `myproject-123`
|
||||
| endpoint | N | GCP endpoint for the component to use. Only used for local development (for example) with [GCP Pub/Sub Emulator](https://cloud.google.com/pubsub/docs/emulator). The `endpoint` is unnecessary when running against the GCP production API. | `"http://localhost:8085"`
|
||||
| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the Dapr runtime will set it to the Dapr application ID. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID |
|
||||
| identityProjectId | N | If the GCP pubsub project is different from the identity project, specify the identity project using this attribute | `"myproject-123"`
|
||||
| privateKeyId | N | If using explicit credentials, this field should contain the `private_key_id` field from the service account json document | `"my-private-key"`
|
||||
| privateKey | N | If using explicit credentials, this field should contain the `private_key` field from the service account json | `-----BEGIN PRIVATE KEY-----MIIBVgIBADANBgkqhkiG9w0B`
|
||||
|
@ -73,18 +84,78 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| clientX509CertUrl | N | If using explicit credentials, this field should contain the `client_x509_cert_url` field from the service account json | `https://www.googleapis.com/robot/v1/metadata/x509/myserviceaccount%40myproject.iam.gserviceaccount.com`
|
||||
| disableEntityManagement | N | When set to `"true"`, topics and subscriptions do not get created automatically. Default: `"false"` | `"true"`, `"false"`
|
||||
| enableMessageOrdering | N | When set to `"true"`, subscribed messages will be received in order, depending on publishing and permissions configuration. | `"true"`, `"false"`
|
||||
| orderingKey |N | The key provided in the request. It's used when `enableMessageOrdering` is set to `true` to order messages based on such key. | "my-orderingkey"
|
||||
| maxReconnectionAttempts | N |Defines the maximum number of reconnect attempts. Default: `30` | `30`
|
||||
| connectionRecoveryInSec | N |Time in seconds to wait between connection recovery attempts. Default: `2` | `2`
|
||||
| deadLetterTopic | N | Name of the GCP Pub/Sub Topic. This topic **must** exist before using this component. | `"myapp-dlq"`
|
||||
| maxDeliveryAttempts | N | Maximum number of attempts to deliver the message. If `deadLetterTopic` is specified, `maxDeliveryAttempts` is the maximum number of attempts for failed processing of messages. Once that number is reached, the message will be moved to the dead-letter topic. Default: `5` | `5`
|
||||
| type | N | **DEPRECATED** GCP credentials type. Only `service_account` is supported. Defaults to `service_account` | `service_account`
|
||||
|
||||
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
If `enableMessageOrdering` is set to "true", the roles/viewer or roles/pubsub.viewer role will be required on the service account in order to guarantee ordering in cases where order tokens are not embedded in the messages. If this role is not given, or the call to Subscription.Config() fails for any other reason, ordering by embedded order tokens will still function correctly.
|
||||
{{% /alert %}}
|
||||
|
||||
## GCP Credentials
|
||||
|
||||
Since the GCP Pub/Sub component uses the GCP Go Client Libraries, by default it authenticates using **Application Default Credentials**. This is explained further in the [Authenticate to GCP Cloud services using client libraries](https://cloud.google.com/docs/authentication/client-libraries) guide.
|
||||
|
||||
## Create a GCP Pub/Sub
|
||||
|
||||
{{< tabs "Self-Hosted" "GCP" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
For local development, the [GCP Pub/Sub Emulator](https://cloud.google.com/pubsub/docs/emulator) is used to test the GCP Pub/Sub Component. Follow [these instructions](https://cloud.google.com/pubsub/docs/emulator#start) to run the GCP Pub/Sub Emulator.
|
||||
|
||||
To run the GCP Pub/Sub Emulator locally using Docker, use the following `docker-compose.yaml`:
|
||||
|
||||
```yaml
|
||||
version: '3'
|
||||
services:
|
||||
pubsub:
|
||||
image: gcr.io/google.com/cloudsdktool/cloud-sdk:422.0.0-emulators
|
||||
ports:
|
||||
- "8085:8085"
|
||||
container_name: gcp-pubsub
|
||||
entrypoint: gcloud beta emulators pubsub start --project local-test-prj --host-port 0.0.0.0:8085
|
||||
|
||||
```
|
||||
|
||||
In order to use the GCP Pub/Sub Emulator with your pub/sub binding, you need to provide the `endpoint` configuration in the component metadata. The `endpoint` is unnecessary when running against the GCP Production API.
|
||||
|
||||
The **projectId** attribute must match the `--project` used in either the `docker-compose.yaml` or Docker command.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: gcp-pubsub
|
||||
spec:
|
||||
type: pubsub.gcp.pubsub
|
||||
version: v1
|
||||
metadata:
|
||||
- name: projectId
|
||||
value: "local-test-prj"
|
||||
- name: consumerID
|
||||
value: "testConsumer"
|
||||
- name: endpoint
|
||||
value: "localhost:8085"
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
You can use either "explicit" or "implicit" credentials to configure access to your GCP pubsub instance. If using explicit, most fields are required. Implicit relies on dapr running under a Kubernetes service account (KSA) mapped to a Google service account (GSA) which has the necessary permissions to access pubsub. In implicit mode, only the `projectId` attribute is needed, all other are optional.
|
||||
|
||||
Follow the instructions [here](https://cloud.google.com/pubsub/docs/quickstart-console) on setting up Google Cloud Pub/Sub system.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Related links
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Hazelcast"
|
||||
linkTitle: "Hazelcast"
|
||||
description: "Detailed documentation on the Hazelcast pubsub component"
|
||||
aliases:
|
||||
- "/operations/components/setup-pubsub/supported-pubsub/setup-hazelcast/"
|
||||
---
|
||||
|
||||
{{% alert title="Deprecation notice" color="warning" %}}
|
||||
The Hazelcast PubSub component has been deprecated due to inherent lack of support for "at least once" delivery guarantee, and will be removed in a future Dapr release.
|
||||
{{% /alert %}}
|
||||
|
||||
## Component format
|
||||
To setup hazelcast pubsub create a component of type `pubsub.hazelcast`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: hazelcast-pubsub
|
||||
spec:
|
||||
type: pubsub.hazelcast
|
||||
version: v1
|
||||
metadata:
|
||||
- name: hazelcastServers
|
||||
value: "hazelcast:3000,hazelcast2:3000"
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| connectionString | Y | A comma delimited string of servers. Example: "hazelcast:3000,hazelcast2:3000" | `"hazelcast:3000,hazelcast2:3000"`
|
||||
| backOffMaxRetries | N | The maximum number of retries to process the message before returning an error. Defaults to `"0"` which means the component will not retry processing the message. `"-1"` will retry indefinitely until the message is processed or the application is shutdown. And positive number is treated as the maximum retry count. The component will wait 5 seconds between retries. | `"3"` |
|
||||
|
||||
|
||||
## Create a Hazelcast instance
|
||||
|
||||
{{< tabs "Self-Hosted" "Kubernetes">}}
|
||||
|
||||
{{% codetab %}}
|
||||
You can run Hazelcast locally using Docker:
|
||||
|
||||
```
|
||||
docker run -e JAVA_OPTS="-Dhazelcast.local.publicAddress=127.0.0.1:5701" -p 5701:5701 hazelcast/hazelcast
|
||||
```
|
||||
|
||||
You can then interact with the server using the `127.0.0.1:5701`.
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
The easiest way to install Hazelcast on Kubernetes is by using the [Helm chart](https://github.com/helm/charts/tree/master/stable/hazelcast).
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Related links
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components
|
||||
- [Pub/Sub building block]({{< ref pubsub >}})
|
|
@ -200,14 +200,15 @@ Setting `exchangeKind` to `"topic"` uses the topic exchanges, which are commonly
|
|||
Messages with a `routing key` will be routed to one or many queues based on the `routing key` defined in the metadata when subscribing.
|
||||
The routing key is defined by the `routingKey` metadata. For example, if an app is configured with a routing key `keyA`:
|
||||
|
||||
```
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
```yaml
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: order_pub_sub
|
||||
name: orderspubsub
|
||||
spec:
|
||||
topic: B
|
||||
route: /B
|
||||
routes:
|
||||
default: /B
|
||||
pubsubname: pubsub
|
||||
metadata:
|
||||
routingKey: keyA
|
||||
|
@ -227,14 +228,15 @@ client.PublishEvent(context.Background(), "pubsub", "B", []byte("this is another
|
|||
Multiple routing keys can be separated by commas.
|
||||
The example below binds three `routingKey`: `keyA`, `keyB`, and `""`. Note the binding method of empty keys.
|
||||
|
||||
```
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
```yaml
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: order_pub_sub
|
||||
name: orderspubsub
|
||||
spec:
|
||||
topic: B
|
||||
route: /B
|
||||
routes:
|
||||
default: /B
|
||||
pubsubname: pubsub
|
||||
metadata:
|
||||
routingKey: keyA,keyB,
|
||||
|
@ -243,6 +245,168 @@ spec:
|
|||
|
||||
For more information see [rabbitmq exchanges](https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchanges).
|
||||
|
||||
## Use priority queues
|
||||
|
||||
Dapr supports RabbitMQ [priority queues](https://www.rabbitmq.com/priority.html). To set a priority for a queue, use the `maxPriority` topic subscription metadata.
|
||||
|
||||
### Declarative priority queue example
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v2alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: pubsub
|
||||
spec:
|
||||
topic: checkout
|
||||
routes:
|
||||
default: /orders
|
||||
pubsubname: order-pub-sub
|
||||
metadata:
|
||||
maxPriority: 3
|
||||
```
|
||||
|
||||
### Programmatic priority queue example
|
||||
|
||||
{{< tabs Python JavaScript Go>}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```python
|
||||
@app.route('/dapr/subscribe', methods=['GET'])
|
||||
def subscribe():
|
||||
subscriptions = [
|
||||
{
|
||||
'pubsubname': 'pubsub',
|
||||
'topic': 'checkout',
|
||||
'routes': {
|
||||
'default': '/orders'
|
||||
},
|
||||
'metadata': {'maxPriority': '3'}
|
||||
}
|
||||
]
|
||||
return jsonify(subscriptions)
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```javascript
|
||||
const express = require('express')
|
||||
const bodyParser = require('body-parser')
|
||||
const app = express()
|
||||
app.use(bodyParser.json({ type: 'application/*+json' }));
|
||||
|
||||
const port = 3000
|
||||
|
||||
app.get('/dapr/subscribe', (req, res) => {
|
||||
res.json([
|
||||
{
|
||||
pubsubname: "pubsub",
|
||||
topic: "checkout",
|
||||
routes: {
|
||||
default: '/orders'
|
||||
},
|
||||
metadata: {
|
||||
maxPriority: '3'
|
||||
}
|
||||
}
|
||||
]);
|
||||
})
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
const appPort = 3000
|
||||
|
||||
type subscription struct {
|
||||
PubsubName string `json:"pubsubname"`
|
||||
Topic string `json:"topic"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
Routes routes `json:"routes"`
|
||||
}
|
||||
|
||||
type routes struct {
|
||||
Rules []rule `json:"rules,omitempty"`
|
||||
Default string `json:"default,omitempty"`
|
||||
}
|
||||
|
||||
// This handles /dapr/subscribe
|
||||
func configureSubscribeHandler(w http.ResponseWriter, _ *http.Request) {
|
||||
t := []subscription{
|
||||
{
|
||||
PubsubName: "pubsub",
|
||||
Topic: "checkout",
|
||||
Routes: routes{
|
||||
Default: "/orders",
|
||||
},
|
||||
Metadata: map[string]string{
|
||||
"maxPriority": "3"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(t)
|
||||
}
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Setting a priority when publishing a message
|
||||
|
||||
To set a priority on a message, add the publish metadata key `maxPriority` to the publish endpoint or SDK method.
|
||||
|
||||
{{< tabs "HTTP API (Bash)" Python JavaScript Go>}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3601/v1.0/publish/order-pub-sub/orders?metadata.maxPriority=3 -H "Content-Type: application/json" -d '{"orderId": "100"}'
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```python
|
||||
with DaprClient() as client:
|
||||
result = client.publish_event(
|
||||
pubsub_name=PUBSUB_NAME,
|
||||
topic_name=TOPIC_NAME,
|
||||
data=json.dumps(orderId),
|
||||
data_content_type='application/json',
|
||||
metadata= { 'maxPriority': '3' })
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```javascript
|
||||
await client.pubsub.publish(PUBSUB_NAME, TOPIC_NAME, orderId, { 'maxPriority': '3' });
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
```go
|
||||
client.PublishEvent(ctx, PUBSUB_NAME, TOPIC_NAME, []byte(strconv.Itoa(orderId)), map[string]string{"maxPriority": "3"})
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}}) in the Related links section
|
||||
|
|
|
@ -58,12 +58,20 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| producerQueueSelector (queueSelector) | N | Producer Queue selector. There are five implementations of queue selector: `hash`, `random`, `manual`, `roundRobin`, `dapr`. | `dapr` | `hash` |
|
||||
| consumerModel | N | Message model that defines how messages are delivered to each consumer client. RocketMQ supports two message models: `clustering` and `broadcasting`. | `clustering` | `broadcasting` , `clustering` |
|
||||
| fromWhere (consumeFromWhere) | N | Consuming point on consumer booting. There are three consuming points: `CONSUME_FROM_LAST_OFFSET`, `CONSUME_FROM_FIRST_OFFSET`, `CONSUME_FROM_TIMESTAMP` | `CONSUME_FROM_LAST_OFFSET` | `CONSUME_FROM_LAST_OFFSET` |
|
||||
| consumeTimestamp | N | Backtracks consumption time with second precision. Time format is `yyyymmddhhmmss`. For example, `20131223171201` implies the time of 17:12:01 and date of December 23, 2013 | ` time.Now().Add(time.Minute * (-30)).Format("20060102150405")` | `20131223171201` |
|
||||
| consumeOrderly | N | Determines if it's an ordered message using FIFO order. | `false` | `false` |
|
||||
| consumeMessageBatchMaxSize | N | Batch consumption size out of range `[1, 1024]` | `512` | `10` |
|
||||
| consumeConcurrentlyMaxSpan | N | Concurrently max span offset. This has no effect on sequential consumption. Range: `[1, 65535]` | `1000` | `1000` |
|
||||
| maxReconsumeTimes | N | Max re-consume times. `-1` means 16 times. If messages are re-consumed more than {@link maxReconsumeTimes} before success, they'll be directed to a deletion queue. | Orderly message is `MaxInt32`; Concurrently message is `16` | `16` |
|
||||
| autoCommit | N | Enable auto commit | `true` | `false` |
|
||||
| consumeTimeout | N | Maximum amount of time a message may block the consuming thread. Time unit: Minute | `15` | `15` |
|
||||
| consumerPullTimeout | N | The socket timeout in milliseconds | | |
|
||||
| pullInterval | N | Message pull interval | `100` | `100` |
|
||||
| pullBatchSize | N | The number of messages pulled from the broker at a time. If `pullBatchSize` is `null`, use `ConsumerBatchSize`. `pullBatchSize` out of range `[1, 1024]` | `32` | `10` |
|
||||
| pullThresholdForQueue | N | Flow control threshold on queue level. Each message queue will cache a maximum of 1000 messages by default. Consider the `PullBatchSize` - the instantaneous value may exceed the limit. Range: `[1, 65535]` | `1024` | `1000` |
|
||||
| pullThresholdForTopic | N | Flow control threshold on topic level. The value of `pullThresholdForQueue` will be overwritten and calculated based on `pullThresholdForTopic` if it isn't unlimited. For example, if the value of `pullThresholdForTopic` is 1000 and 10 message queues are assigned to this consumer, then `pullThresholdForQueue` will be set to 100. Range: `[1, 6553500]` | `-1(Unlimited)` | `10` |
|
||||
| pullThresholdSizeForQueue | N | Limit the cached message size on queue level. Consider the `pullBatchSize` - the instantaneous value may exceed the limit. The size of a message is only measured by message body, so it's not accurate. Range: `[1, 1024]` | `100` | `100` |
|
||||
| pullThresholdSizeForTopic | N | Limit the cached message size on topic level. The value of `pullThresholdSizeForQueue` will be overwritten and calculated based on `pullThresholdSizeForTopic` if it isn't unlimited. For example, if the value of `pullThresholdSizeForTopic` is 1000 MiB and 10 message queues are assigned to this consumer, then `pullThresholdSizeForQueue` will be set to 100 MiB. Range: `[1, 102400]` | `-1` | `100` |
|
||||
| content-type | N | Message content type. | `"text/plain"` | `"application/cloudevents+json; charset=utf-8"`, `"application/octet-stream"` |
|
||||
| logLevel | N | Log level | `warn` | `info` |
|
||||
| sendTimeOut | N | Send message timeout to connect RocketMQ's broker, measured in nanoseconds. **Deprecated**. | 3 seconds | `10000000000` |
|
||||
|
|
|
@ -26,7 +26,23 @@ spec:
|
|||
type: secretstores.local.env
|
||||
version: v1
|
||||
metadata:
|
||||
# - name: prefix
|
||||
# value: "MYAPP_"
|
||||
```
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|-------|:--------:|---------|---------|
|
||||
| `prefix` | N | If set, limits operations to environment variables with the given prefix. The prefix is removed from the returned secrets' names.<br>The matching is case-insensitive on Windows and case-sensitive on all other operating systems. | `"MYAPP_"`
|
||||
|
||||
## Notes
|
||||
|
||||
For security reasons, this component cannot be used to access these environment variables:
|
||||
|
||||
- `APP_API_TOKEN`
|
||||
- Any variable whose name begines with the `DAPR_` prefix
|
||||
|
||||
## Related Links
|
||||
- [Secrets building block]({{< ref secrets >}})
|
||||
- [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}})
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Etcd"
|
||||
linkTitle: "Etcd"
|
||||
description: Detailed information on the Etcd state store component
|
||||
aliases:
|
||||
- "/operations/components/setup-state-store/supported-state-stores/setup-etcd/"
|
||||
---
|
||||
|
||||
## Component format
|
||||
|
||||
To setup an Etcd state store create a component of type `state.etcd`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: <NAME>
|
||||
spec:
|
||||
type: state.etcd
|
||||
version: v1
|
||||
metadata:
|
||||
- name: endpoints
|
||||
value: <CONNECTION STRING> # Required. Example: 192.168.0.1:2379,192.168.0.2:2379,192.168.0.3:2379
|
||||
- name: keyPrefixPath
|
||||
value: <KEY PREFIX STRING> # Optional. default: "". Example: "dapr"
|
||||
- name: tlsEnable
|
||||
value: <ENABLE TLS> # Optional. Example: "false"
|
||||
- name: ca
|
||||
value: <CA> # Optional. Required if tlsEnable is `true`.
|
||||
- name: cert
|
||||
value: <CERT> # Optional. Required if tlsEnable is `true`.
|
||||
- name: key
|
||||
value: <KEY> # Optional. Required if tlsEnable is `true`.
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| `endpoints` | Y | Connection string to the Etcd cluster | `"192.168.0.1:2379,192.168.0.2:2379,192.168.0.3:2379"`
|
||||
| `keyPrefixPath` | N | Key prefix path in Etcd. Default is no prefix. | `"dapr"`
|
||||
| `tlsEnable` | N | Whether to enable TLS for connecting to Etcd. | `"false"`
|
||||
| `ca` | N | CA certificate for connecting to Etcd, PEM-encoded. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}).| `"-----BEGIN CERTIFICATE-----\nMIIC9TCCA..."`
|
||||
| `cert` | N | TLS certificate for connecting to Etcd, PEM-encoded. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}).| `"-----BEGIN CERTIFICATE-----\nMIIDUTCC..."`
|
||||
| `key` | N | TLS key for connecting to Etcd, PEM-encoded. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}).| `"-----BEGIN PRIVATE KEY-----\nMIIEpAIB..."`
|
||||
|
||||
## Setup Etcd
|
||||
|
||||
{{< tabs "Self-Hosted" "Kubernetes" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
You can run Etcd database locally using Docker Compose. Create a new file called `docker-compose.yml` and add the following contents as an example:
|
||||
|
||||
```yaml
|
||||
version: '2'
|
||||
services:
|
||||
etcd:
|
||||
image: gcr.io/etcd-development/etcd:v3.4.20
|
||||
ports:
|
||||
- "2379:2379"
|
||||
command: etcd --listen-client-urls http://0.0.0.0:2379 --advertise-client-urls http://0.0.0.0:2379```
|
||||
```
|
||||
|
||||
Save the `docker-compose.yml` file and run the following command to start the Etcd server:
|
||||
|
||||
```sh
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
This starts the Etcd server in the background and expose the default Etcd port of `2379`. You can then interact with the server using the `etcdctl` command-line client on `localhost:12379`. For example:
|
||||
|
||||
```sh
|
||||
etcdctl --endpoints=localhost:2379 put mykey myvalue
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
||||
Use [Helm](https://helm.sh/) to quickly create an Etcd instance in your Kubernetes cluster. This approach requires [Installing Helm](https://github.com/helm/helm#install).
|
||||
|
||||
Follow the [Bitnami instructions](https://github.com/bitnami/charts/tree/main/bitnami/etcd) to get started with setting up Etcd in Kubernetes.
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components
|
||||
- [State management building block]({{< ref state-management >}})
|
|
@ -21,30 +21,32 @@ spec:
|
|||
type: state.gcp.firestore
|
||||
version: v1
|
||||
metadata:
|
||||
- name: type
|
||||
value: <REPLACE-WITH-CREDENTIALS-TYPE> # Required. Example: "serviceaccount"
|
||||
- name: project_id
|
||||
value: <REPLACE-WITH-PROJECT-ID> # Required.
|
||||
- name: endpoint # Optional.
|
||||
value: "http://localhost:8432"
|
||||
- name: private_key_id
|
||||
value: <REPLACE-WITH-PRIVATE-KEY-ID> # Required.
|
||||
value: <REPLACE-WITH-PRIVATE-KEY-ID> # Optional.
|
||||
- name: private_key
|
||||
value: <REPLACE-WITH-PRIVATE-KEY> # Required.
|
||||
value: <REPLACE-WITH-PRIVATE-KEY> # Optional, but Required if `private_key_id` is specified.
|
||||
- name: client_email
|
||||
value: <REPLACE-WITH-CLIENT-EMAIL> # Required.
|
||||
value: <REPLACE-WITH-CLIENT-EMAIL> # Optional, but Required if `private_key_id` is specified.
|
||||
- name: client_id
|
||||
value: <REPLACE-WITH-CLIENT-ID> # Required.
|
||||
value: <REPLACE-WITH-CLIENT-ID> # Optional, but Required if `private_key_id` is specified.
|
||||
- name: auth_uri
|
||||
value: <REPLACE-WITH-AUTH-URI> # Required.
|
||||
value: <REPLACE-WITH-AUTH-URI> # Optional.
|
||||
- name: token_uri
|
||||
value: <REPLACE-WITH-TOKEN-URI> # Required.
|
||||
value: <REPLACE-WITH-TOKEN-URI> # Optional.
|
||||
- name: auth_provider_x509_cert_url
|
||||
value: <REPLACE-WITH-AUTH-X509-CERT-URL> # Required.
|
||||
value: <REPLACE-WITH-AUTH-X509-CERT-URL> # Optional.
|
||||
- name: client_x509_cert_url
|
||||
value: <REPLACE-WITH-CLIENT-x509-CERT-URL> # Required.
|
||||
value: <REPLACE-WITH-CLIENT-x509-CERT-URL> # Optional.
|
||||
- name: entity_kind
|
||||
value: <REPLACE-WITH-ENTITY-KIND> # Optional. default: "DaprState"
|
||||
- name: noindex
|
||||
value: <REPLACE-WITH-BOOLEAN> # Optional. default: "false"
|
||||
- name: type
|
||||
value: <REPLACE-WITH-CREDENTIALS-TYPE> # Deprecated.
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
|
@ -55,17 +57,23 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| type | Y | The credentials type | `"serviceaccount"`
|
||||
| project_id | Y | The ID of the GCP project to use | `"project-id"`
|
||||
| private_key_id | Y | The ID of the prvate key to use | `"private-key-id"`
|
||||
| client_email | Y | The email address for the client | `"eample@example.com"`
|
||||
| client_id | Y | The client id value to use for authentication | `"client-id"`
|
||||
| auth_uri | Y | The authentication URI to use | `"https://accounts.google.com/o/oauth2/auth"`
|
||||
| token_uri | Y | The token URI to query for Auth token | `"https://oauth2.googleapis.com/token"`
|
||||
| auth_provider_x509_cert_url | Y | The auth provider certificate URL | `"https://www.googleapis.com/oauth2/v1/certs"`
|
||||
| client_x509_cert_url | Y | The client certificate URL | `"https://www.googleapis.com/robot/v1/metadata/x509/x"`
|
||||
| endpoint | N | GCP endpoint for the component to use. Only used for local development with (for example) [GCP Datastore Emulator](https://cloud.google.com/datastore/docs/tools/datastore-emulator). The `endpoint` is unnecessary when running against the GCP production API. | `"localhost:8432"`
|
||||
| private_key_id | N | The ID of the prvate key to use | `"private-key-id"`
|
||||
| privateKey | N | If using explicit credentials, this field should contain the `private_key` field from the service account json | `-----BEGIN PRIVATE KEY-----MIIBVgIBADANBgkqhkiG9w0B`
|
||||
| client_email | N | The email address for the client | `"eample@example.com"`
|
||||
| client_id | N | The client id value to use for authentication | `"client-id"`
|
||||
| auth_uri | N | The authentication URI to use | `"https://accounts.google.com/o/oauth2/auth"`
|
||||
| token_uri | N | The token URI to query for Auth token | `"https://oauth2.googleapis.com/token"`
|
||||
| auth_provider_x509_cert_url | N | The auth provider certificate URL | `"https://www.googleapis.com/oauth2/v1/certs"`
|
||||
| client_x509_cert_url | N | The client certificate URL | `"https://www.googleapis.com/robot/v1/metadata/x509/x"`
|
||||
| entity_kind | N | The entity name in Filestore. Defaults to `"DaprState"` | `"DaprState"`
|
||||
| noindex | N | Whether to disable indexing of state entities. Use this setting if you encounter Firestore index size limitations. Defaults to `"false"` | `"true"`
|
||||
| type | N | **DEPRECATED** The credentials type | `"serviceaccount"`
|
||||
|
||||
|
||||
## GCP Credentials
|
||||
Since the GCP Firestore component uses the GCP Go Client Libraries, by default it authenticates using **Application Default Credentials**. This is explained in the [Authenticate to GCP Cloud services using client libraries](https://cloud.google.com/docs/authentication/client-libraries) guide.
|
||||
|
||||
## Setup GCP Firestore
|
||||
|
||||
|
@ -74,7 +82,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
{{% codetab %}}
|
||||
You can use the GCP Datastore emulator to run locally using the instructions [here](https://cloud.google.com/datastore/docs/tools/datastore-emulator).
|
||||
|
||||
You can then interact with the server using `localhost:8081`.
|
||||
You can then interact with the server using `http://localhost:8432`.
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
|
|
|
@ -112,6 +112,10 @@ The username is `admin` by default.
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
### TTLs and cleanups
|
||||
|
||||
This state store supports [Time-To-Live (TTL)]({{< ref state-store-ttl.md >}}) for records stored with Dapr. When storing data using Dapr, you can set the `ttlInSeconds` metadata property to indicate when the data should be considered "expired".
|
||||
|
||||
## Related links
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "MySQL"
|
||||
linkTitle: "MySQL"
|
||||
title: "MySQL & MariaDB"
|
||||
linkTitle: "MySQL & MariaDB"
|
||||
description: Detailed information on the MySQL state store component
|
||||
aliases:
|
||||
- "/operations/components/setup-state-store/supported-state-stores/setup-mysql/"
|
||||
|
@ -9,6 +9,8 @@ aliases:
|
|||
|
||||
## Component format
|
||||
|
||||
The MySQL state store components allows connecting to both MySQL and MariaDB databases. In this document, we refer to "MySQL" to indicate both databases.
|
||||
|
||||
To setup MySQL state store create a component of type `state.mysql`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
|
||||
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ This component allows using PostgreSQL (Postgres) as state store for Dapr.
|
|||
|
||||
## Create a Dapr component
|
||||
|
||||
Create a file called `postgres.yaml`, paste the following and replace the `<CONNECTION STRING>` value with your connection string. The connection string is a standard PostgreSQL connection string. For example, `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"`. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string.
|
||||
Create a file called `postgresql.yaml`, paste the following and replace the `<CONNECTION STRING>` value with your connection string. The connection string is a standard PostgreSQL connection string. For example, `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"`. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string.
|
||||
|
||||
If you want to also configure PostgreSQL to store actors, add the `actorStateStore` option as in the example below.
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ aliases:
|
|||
|
||||
This component allows using SQLite 3 as state store for Dapr.
|
||||
|
||||
> The component is currently compiled with SQLite version 3.40.1.
|
||||
> The component is currently compiled with SQLite version 3.41.2.
|
||||
|
||||
## Create a Dapr component
|
||||
|
||||
|
@ -36,8 +36,11 @@ spec:
|
|||
#- name: tableName
|
||||
# value: "state"
|
||||
# Cleanup interval in seconds, to remove expired rows (optional)
|
||||
#- name: cleanupIntervalInSeconds
|
||||
# value: 3600
|
||||
#- name: cleanupInterval
|
||||
# value: "1h"
|
||||
# Set busy timeout for database operations
|
||||
#- name: busyTimeout
|
||||
# value: "2s"
|
||||
# Uncomment this if you wish to use SQLite as a state store for actors (optional)
|
||||
#- name: actorStateStore
|
||||
# value: "true"
|
||||
|
@ -50,14 +53,17 @@ spec:
|
|||
| `connectionString` | Y | The connection string for the SQLite database. See below for more details. | `"path/to/data.db"`, `"file::memory:?cache=shared"`
|
||||
| `timeoutInSeconds` | N | Timeout, in seconds, for all database operations. Defaults to `20` | `30`
|
||||
| `tableName` | N | Name of the table where the data is stored. Defaults to `state`. | `"state"`
|
||||
| `cleanupIntervalInSeconds` | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `3600` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `1800`, `-1`
|
||||
| `metadataTableName` | N | Name of the table used by Dapr to store metadata for the component. Defaults to `metadata`. | `"metadata"`
|
||||
| `cleanupInterval` | N | Interval, as a [Go duration](https://pkg.go.dev/time#ParseDuration), to clean up rows with an expired TTL. Setting this to values <=0 disables the periodic cleanup. Default: `0` (i.e. disabled) | `"2h"`, `"30m"`, `-1`
|
||||
| `busyTimeout` | N | Interval, as a [Go duration](https://pkg.go.dev/time#ParseDuration), to wait in case the SQLite database is currently busy serving another request, before returning a "database busy" error. Default: `2s` | `"100ms"`, `"5s"`
|
||||
| `disableWAL` | N | If set to true, disables Write-Ahead Logging for journaling of the SQLite database. You should set this to `false` if the database is stored on a network file system (e.g. a folder mounted as a SMB or NFS share). This option is ignored for read-only or in-memory databases. | `"100ms"`, `"5s"`
|
||||
| `actorStateStore` | N | Consider this state store for actors. Defaults to `"false"` | `"true"`, `"false"`
|
||||
|
||||
The **`connectionString`** parameter configures how to open the SQLite database.
|
||||
|
||||
- Normally, this is the path to a file on disk, relative to the current working directory, or absolute. For example: `"data.db"` (relative to the working directory) or `"/mnt/data/mydata.db"`.
|
||||
- The path is interpreted by the SQLite library, so it's possible to pass additional options to the SQLite driver using "URI options" if the path begins with `file:`. For example: `"file:path/to/data.db?mode=ro"` opens the database at path `path/to/data.db` in read-only mode. [Refer to the SQLite documentation for all supported URI options](https://www.sqlite.org/uri.html).
|
||||
- The special case `":memory:"` launches the component backed by an in-memory SQLite database. This database is not persisted on disk, not shared across multiple Dapr instances, and all data is lost when the Dapr sidecar is stopped. When using an in-memory database, you should always set the `?cache=shared` URI option: `"file::memory:?cache=shared"`
|
||||
- The special case `":memory:"` launches the component backed by an in-memory SQLite database. This database is not persisted on disk, not shared across multiple Dapr instances, and all data is lost when the Dapr sidecar is stopped. When using an in-memory database, Dapr automatically sets the `cache=shared` URI option.
|
||||
|
||||
## Advanced
|
||||
|
||||
|
@ -67,10 +73,10 @@ This state store supports [Time-To-Live (TTL)]({{< ref state-store-ttl.md >}}) f
|
|||
|
||||
Because SQLite doesn't have built-in support for TTLs, this is implemented in Dapr by adding a column in the state table indicating when the data is to be considered "expired". Records that are "expired" are not returned to the caller, even if they're still physically stored in the database. A background "garbage collector" periodically scans the state table for expired rows and deletes them.
|
||||
|
||||
The `cleanupIntervalInSeconds` metadata property sets the expired records deletion interval, which defaults to 3600 seconds (that is, 1 hour).
|
||||
The `cleanupInterval` metadata property sets the expired records deletion interval, which is disabled by default.
|
||||
|
||||
- Longer intervals require less frequent scans for expired rows, but can require storing expired records for longer, potentially requiring more storage space. If you plan to store many records in your state table, with short TTLs, consider setting `cleanupIntervalInSeconds` to a smaller value, for example `300` (300 seconds, or 5 minutes).
|
||||
- If you do not plan to use TTLs with Dapr and the SQLite state store, you should consider setting `cleanupIntervalInSeconds` to a value <= 0 (e.g. `0` or `-1`) to disable the periodic cleanup and reduce the load on the database.
|
||||
- Longer intervals require less frequent scans for expired rows, but can cause the database to store expired records for longer, potentially requiring more storage space. If you plan to store many records in your state table, with short TTLs, consider setting `cleanupInterval` to a smaller value, for example `5m`.
|
||||
- If you do not plan to use TTLs with Dapr and the SQLite state store, you should consider setting `cleanupInterval` to a value <= 0 (e.g. `0` or `-1`) to disable the periodic cleanup and reduce the load on the database. This is the default behavior.
|
||||
|
||||
The `expiration_time` column in the state table, where the expiration date for records is stored, **does not have an index by default**, so each periodic cleanup must perform a full-table scan. If you have a table with a very large number of records, and only some of them use a TTL, you may find it useful to create an index on that column. Assuming that your state table name is `state` (the default), you can use this query:
|
||||
|
||||
|
@ -79,6 +85,18 @@ CREATE INDEX idx_expiration_time
|
|||
ON state (expiration_time);
|
||||
```
|
||||
|
||||
> Dapr does not automatically [vacuum](https://www.sqlite.org/lang_vacuum.html) SQLite databases.
|
||||
|
||||
### Sharing a SQLite database and using networked filesystems
|
||||
|
||||
Although you can have multiple Dapr instances accessing the same SQLite database (for example, because your application is scaled horizontally or because you have multiple apps accessing the same state store), there are some caveats you should keep in mind.
|
||||
|
||||
SQLite works best when all clients access a database file on the same, locally-mounted disk. Using virtual disks that are mounted from a SAN (Storage Area Network), as is common practice in virtualized or cloud environments, is fine.
|
||||
|
||||
However, storing your SQLite database in a networked filesystem (for example via NFS or SMB, but these examples are not an exhaustive list) should be done with care. The official SQLite documentation has a page dedicated to [recommendations and caveats for running SQLite over a network](https://www.sqlite.org/useovernet.html).
|
||||
|
||||
Given the risk of data corruption that running SQLite over a networked filesystem (such as via NFS or SMB) comes with, we do not recommend doing that with Dapr in production environment. However, if you do want to do that, you should configure your SQLite Dapr component with `disableWAL` set to `true`.
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
|
|
|
@ -10,13 +10,11 @@ The following table lists the environment variables used by the Dapr runtime, CL
|
|||
|
||||
| Environment Variable | Used By | Description |
|
||||
| -------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| APP_ID | Your application | The id for your application, used for service discovery |
|
||||
| APP_PORT | Your application | The port your application is listening on |
|
||||
| APP_ID | Your application | The id for your application, used for service discovery |
|
||||
| APP_PORT | Dapr sidecar | The port your application is listening on |
|
||||
| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. |
|
||||
| DAPR_HTTP_PORT | Your application | The HTTP port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. |
|
||||
| DAPR_GRPC_PORT | Your application | The gRPC port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. |
|
||||
| DAPR_METRICS_PORT | Your application | The HTTP [Prometheus]({{< ref prometheus >}}) port to which Dapr sends its metrics information. With this variable, your application sends its application-specific metrics to have both Dapr metrics and application metrics together. See [metrics-port]({{< ref arguments-annotations-overview>}}) for more information |
|
||||
| DAPR_PROFILE_PORT | Your application | The [profiling port]({{< ref profiling-debugging >}}) through which Dapr lets you enable profiling and track possible CPU/memory/resource spikes in your application's behavior. Enabled by `--enable-profiling` command in Dapr CLI for self-hosted or `dapr.io/enable-profiling` annotation in Dapr annotated pod. |
|
||||
| DAPR_API_TOKEN | Dapr sidecar | The token used for Dapr API authentication for requests from the application. [Enable API token authentication in Dapr]({{< ref api-token >}}). |
|
||||
| NAMESPACE | Dapr sidecar | Used to specify a component's [namespace in self-hosted mode]({{< ref component-scopes >}}). |
|
||||
| DAPR_DEFAULT_IMAGE_REGISTRY | Dapr CLI | In self-hosted mode, it is used to specify the default container registry to pull images from. When its value is set to `GHCR` or `ghcr`, it pulls the required images from Github container registry. To default to Docker hub, unset this environment variable. |
|
||||
|
@ -24,3 +22,6 @@ The following table lists the environment variables used by the Dapr runtime, CL
|
|||
| DAPR_HELM_REPO_URL | Your private Dapr Helm chart url | Specifies a private Dapr Helm chart url, which defaults to the official Helm chart URL: `https://dapr.github.io/helm-charts`|
|
||||
| DAPR_HELM_REPO_USERNAME | A username for a private Helm chart | The username required to access the private Dapr Helm chart. If it can be accessed publicly, this env variable does not need to be set|
|
||||
| DAPR_HELM_REPO_PASSWORD | A password for a private Helm chart |The password required to access the private Dapr helm chart. If it can be accessed publicly, this env variable does not need to be set|
|
||||
| OTEL_EXPORTER_OTLP_ENDPOINT | OpenTelemetry Tracing | Sets the Open Telemetry (OTEL) server address, turns on tracing. (Example: `http://localhost:4318`) |
|
||||
| OTEL_EXPORTER_OTLP_INSECURE | OpenTelemetry Tracing | Sets the connection to the endpoint as unencrypted. (`true`, `false`) |
|
||||
| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) |
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Dapr resource specs"
|
||||
linkTitle: "Resource specs"
|
||||
description: "Detailed information and specifications on Dapr resources"
|
||||
weight: 500
|
||||
---
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
type: docs
|
||||
title: "HTTPEndpoint spec"
|
||||
linkTitle: "HTTPEndpoint spec"
|
||||
description: "The HTTPEndpoint resource spec"
|
||||
weight: 300
|
||||
aliases:
|
||||
- "/operations/httpEndpoints/"
|
||||
---
|
||||
|
||||
The `HTTPEndpoint` is a Dapr resource that is used to enable the invocation of non-Dapr endpoints from a Dapr application.
|
||||
|
||||
## HTTPEndpoint format
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: HTTPEndpoint
|
||||
metadata:
|
||||
name: <NAME>
|
||||
spec:
|
||||
version: v1alpha1
|
||||
baseUrl: <REPLACE-WITH-BASEURL> # Required. Use "http://" or "https://" prefix.
|
||||
headers: # Optional
|
||||
- name: <REPLACE-WITH-A-HEADER-NAME>
|
||||
value: <REPLACE-WITH-A-HEADER-VALUE>
|
||||
- name: <REPLACE-WITH-A-HEADER-NAME>
|
||||
secretKeyRef:
|
||||
name: <REPLACE-WITH-SECRET-NAME>
|
||||
key: <REPLACE-WITH-SECRET-KEY>
|
||||
scopes: # Optional
|
||||
- <REPLACE-WITH-SCOPED-APPIDS>
|
||||
auth: # Optional
|
||||
secretStore: <REPLACE-WITH-SECRETSTORE>
|
||||
```
|
||||
|
||||
## Spec fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| baseUrl | Y | Base URL of the non-Dapr endpoint | `"https://api.github.com"`, `"http://api.github.com"`
|
||||
| headers | N | HTTP request headers for service invocation | `name: "Accept-Language" value: "en-US"` <br/> `name: "Authorization" secretKeyRef.name: "my-secret" secretKeyRef.key: "myGithubToken" `
|
|
@ -8,9 +8,9 @@
|
|||
output: true
|
||||
- component: AWS S3
|
||||
link: s3
|
||||
state: Alpha
|
||||
state: Stable
|
||||
version: v1
|
||||
since: "1.0"
|
||||
since: "1.11"
|
||||
features:
|
||||
input: false
|
||||
output: true
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
features:
|
||||
input: true
|
||||
output: true
|
||||
- component: MySQL
|
||||
- component: MySQL & MariaDB
|
||||
link: mysql
|
||||
state: Alpha
|
||||
version: v1
|
||||
|
@ -70,8 +70,8 @@
|
|||
features:
|
||||
input: false
|
||||
output: true
|
||||
- component: PostgreSql
|
||||
link: postgres
|
||||
- component: PostgreSQL
|
||||
link: postgresql
|
||||
state: Stable
|
||||
version: v1
|
||||
since: "1.9"
|
||||
|
@ -118,14 +118,6 @@
|
|||
features:
|
||||
input: false
|
||||
output: true
|
||||
- component: Twitter
|
||||
link: twitter
|
||||
state: Alpha
|
||||
version: v1
|
||||
since: "1.0"
|
||||
features:
|
||||
input: true
|
||||
output: true
|
||||
- component: SendGrid
|
||||
link: sendgrid
|
||||
state: Alpha
|
||||
|
@ -150,3 +142,11 @@
|
|||
features:
|
||||
input: true
|
||||
output: true
|
||||
- component: Wasm
|
||||
link: wasm
|
||||
state: Alpha
|
||||
version: v1
|
||||
since: "1.11"
|
||||
features:
|
||||
input: false
|
||||
output: true
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
- component: Redis
|
||||
link: redis-configuration-store
|
||||
state: Alpha
|
||||
state: Stable
|
||||
version: v1
|
||||
since: "1.5"
|
||||
- component: Postgres
|
||||
link: postgres-configuration-store
|
||||
state: Alpha
|
||||
since: "1.11"
|
||||
- component: PostgreSQL
|
||||
link: postgresql-configuration-store
|
||||
state: Stable
|
||||
version: v1
|
||||
since: "1.9"
|
||||
since: "1.11"
|
||||
- component: Azure App Configuration
|
||||
link: azure-appconfig-configuration-store
|
||||
state: Alpha
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
- component: Rate limit
|
||||
link: middleware-rate-limit
|
||||
state: Alpha
|
||||
state: Stable
|
||||
version: v1
|
||||
since: "1.11"
|
||||
description: Restricts the maximum number of allowed HTTP requests per second
|
||||
- component: OAuth2 Authorization Grant flow
|
||||
link: /reference/components-reference/supported-middleware/middleware-oauth2
|
||||
|
@ -15,7 +16,8 @@
|
|||
description: Enables the [OAuth2 Client Credentials Grant flow](https://tools.ietf.org/html/rfc6749#section-4.4) on a Web API
|
||||
- component: OpenID Connect
|
||||
link: /reference/components-reference/supported-middleware/middleware-bearer
|
||||
state: Alpha
|
||||
state: Stable
|
||||
since: "1.11"
|
||||
version: v1
|
||||
description: Verifies a [Bearer Token](https://tools.ietf.org/html/rfc6750) using [OpenID Connect](https://openid.net/connect/) on a Web API
|
||||
- component: Rego/OPA Policies
|
||||
|
@ -43,8 +45,8 @@
|
|||
state: Stable
|
||||
version: v1
|
||||
description: Converts the body of the request to uppercase letters (demo)
|
||||
- component: WASM
|
||||
- component: Wasm
|
||||
link: /reference/components-reference/supported-middleware/middleware-wasm
|
||||
state: Alpha
|
||||
version: v1
|
||||
description: Use WASM middleware in your HTTP pipeline
|
||||
description: Use Wasm middleware in your HTTP pipeline
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
- component: GCP Pub/Sub
|
||||
link: setup-gcp-pubsub
|
||||
state: Alpha
|
||||
state: Stable
|
||||
version: v1
|
||||
since: "1.0"
|
||||
since: "1.11"
|
||||
features:
|
||||
bulkPublish: false
|
||||
bulkSubscribe: false
|
||||
|
|
|
@ -1,11 +1,3 @@
|
|||
- component: Hazelcast
|
||||
link: setup-hazelcast
|
||||
state: Deprecated
|
||||
version: v1
|
||||
since: "1.9"
|
||||
features:
|
||||
bulkPublish: false
|
||||
bulkSubscribe: false
|
||||
- component: In-memory
|
||||
link: setup-inmemory
|
||||
state: Beta
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
since: "1.10"
|
||||
features:
|
||||
crud: true
|
||||
transactions: false
|
||||
transactions: true
|
||||
etag: true
|
||||
ttl: true
|
||||
query: false
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue