merge conflict

Signed-off-by: Hannah Hunter <hannahhunter@microsoft.com>
This commit is contained in:
Hannah Hunter 2024-02-14 14:03:38 -05:00
commit c3a2b69a99
30 changed files with 1319 additions and 257 deletions

View File

@ -209,18 +209,6 @@ url_latest_version = "https://docs.dapr.io"
[[params.versions]]
version = "v1.7"
url = "https://v1-7.docs.dapr.io"
[[params.versions]]
version = "v1.6"
url = "https://v1-6.docs.dapr.io"
[[params.versions]]
version = "v1.5"
url = "https://v1-5.docs.dapr.io"
[[params.versions]]
version = "v1.4"
url = "https://v1-4.docs.dapr.io"
[[params.versions]]
version = "v1.3"
url = "https://v1-3.docs.dapr.io"
# UI Customization
[params.ui]

View File

@ -90,9 +90,13 @@ The diagram below shows an example of how this works. If you have 1 instance of
**Note**: App ID is unique per _application_, not application instance. Regardless how many instances of that application exist (due to scaling), all of them will share the same app ID.
### Pluggable service discovery
### Swappable service discovery
Dapr can run on a variety of [hosting platforms]({{< ref hosting >}}). To enable service discovery and service invocation, Dapr uses pluggable [name resolution components]({{< ref supported-name-resolution >}}). For example, the Kubernetes name resolution component uses the Kubernetes DNS service to resolve the location of other applications running in the cluster. Self-hosted machines can use the mDNS name resolution component. The Consul name resolution component can be used in any hosting environment, including Kubernetes or self-hosted.
Dapr can run on a variety of [hosting platforms]({{< ref hosting >}}). To enable swappable service discovery with service invocation, Dapr uses [name resolution components]({{< ref supported-name-resolution >}}). For example, the Kubernetes name resolution component uses the Kubernetes DNS service to resolve the location of other applications running in the cluster.
Self-hosted machines can use the mDNS name resolution component. As an alternative, you can use the SQLite name resolution component to run Dapr on single-node environments and for local development scenarios. Dapr sidecars that are part of the cluster store their information in a SQLite database on the local machine.
The Consul name resolution component is particularly suited to multi-machine deployments and can be used in any hosting environment, including Kubernetes, multiple VMs, or self-hosted.
### Streaming for HTTP service invocation

View File

@ -34,7 +34,7 @@ The Dapr sidecar doesnt load any workflow definitions. Rather, the sidecar si
[Workflow activities]({{< ref "workflow-features-concepts.md#workflow-activites" >}}) are the basic unit of work in a workflow and are the tasks that get orchestrated in the business process.
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
@ -52,6 +52,37 @@ def hello_act(ctx: WorkflowActivityContext, input):
[See the `hello_act` workflow activity in context.](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py#LL40C1-L43C59)
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
Define the workflow activities you'd like your workflow to perform. Activities are wrapped in the `WorkflowActivityContext` class, which implements the workflow activities.
```javascript
export default class WorkflowActivityContext {
private readonly _innerContext: ActivityContext;
constructor(innerContext: ActivityContext) {
if (!innerContext) {
throw new Error("ActivityContext cannot be undefined");
}
this._innerContext = innerContext;
}
public getWorkflowInstanceId(): string {
return this._innerContext.orchestrationId;
}
public getWorkflowActivityId(): number {
return this._innerContext.taskId;
}
}
```
[See the workflow activity in context.](https://github.com/dapr/js-sdk/blob/main/src/workflow/runtime/WorkflowActivityContext.ts)
{{% /codetab %}}
{{% codetab %}}
@ -193,7 +224,7 @@ func TestActivity(ctx workflow.ActivityContext) (any, error) {
Next, register and call the activites in a workflow.
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
@ -214,6 +245,51 @@ def hello_world_wf(ctx: DaprWorkflowContext, input):
[See the `hello_world_wf` workflow in context.](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py#LL32C1-L38C51)
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
Next, register the workflow with the `WorkflowRuntime` class and start the workflow runtime.
```javascript
export default class WorkflowRuntime {
//..
// Register workflow implementation for handling orchestrations
public registerWorkflow(workflow: TWorkflow): WorkflowRuntime {
const name = getFunctionName(workflow);
const workflowWrapper = (ctx: OrchestrationContext, input: any): any => {
const workflowContext = new WorkflowContext(ctx);
return workflow(workflowContext, input);
};
this.worker.addNamedOrchestrator(name, workflowWrapper);
return this;
}
// Register workflow activities
public registerActivity(fn: TWorkflowActivity<TInput, TOutput>): WorkflowRuntime {
const name = getFunctionName(fn);
const activityWrapper = (ctx: ActivityContext, intput: TInput): TOutput => {
const wfActivityContext = new WorkflowActivityContext(ctx);
return fn(wfActivityContext, intput);
};
this.worker.addNamedActivity(name, activityWrapper);
return this;
}
// Start the workflow runtime processing items and block.
public async start() {
await this.worker.start();
}
}
```
[See the `WorkflowRuntime` in context.](https://github.com/dapr/js-sdk/blob/main/src/workflow/runtime/WorkflowRuntime.ts)
{{% /codetab %}}
{{% codetab %}}
@ -327,7 +403,7 @@ func TestWorkflow(ctx *workflow.WorkflowContext) (any, error) {
Finally, compose the application using the workflow.
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
@ -416,6 +492,153 @@ if __name__ == '__main__':
```
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
[The following example](https://github.com/dapr/js-sdk/blob/main/src/workflow/client/DaprWorkflowClient.ts) is a basic JavaScript application using the JavaScript SDK. As in this example, your project code would include:
- A builder with extensions called:
- `WorkflowRuntime`: Allows you to register workflows and workflow activities
- `DaprWorkflowContext`: Allows you to [create workflows]({{< ref "#write-the-workflow" >}})
- `WorkflowActivityContext`: Allows you to [create workflow activities]({{< ref "#write-the-workflow-activities" >}})
- API calls. In the example below, these calls start, terminate, get status, pause, resume, raise event, and purge the workflow.
```javascript
import { TaskHubGrpcClient } from "@microsoft/durabletask-js";
import { WorkflowState } from "./WorkflowState";
import { generateApiTokenClientInterceptors, generateEndpoint, getDaprApiToken } from "../internal/index";
import { TWorkflow } from "../../types/workflow/Workflow.type";
import { getFunctionName } from "../internal";
import { WorkflowClientOptions } from "../../types/workflow/WorkflowClientOption";
/** DaprWorkflowClient class defines client operations for managing workflow instances. */
export default class DaprWorkflowClient {
private readonly _innerClient: TaskHubGrpcClient;
/** Initialize a new instance of the DaprWorkflowClient.
*/
constructor(options: Partial<WorkflowClientOptions> = {}) {
const grpcEndpoint = generateEndpoint(options);
options.daprApiToken = getDaprApiToken(options);
this._innerClient = this.buildInnerClient(grpcEndpoint.endpoint, options);
}
private buildInnerClient(hostAddress: string, options: Partial<WorkflowClientOptions>): TaskHubGrpcClient {
let innerOptions = options?.grpcOptions;
if (options.daprApiToken !== undefined && options.daprApiToken !== "") {
innerOptions = {
...innerOptions,
interceptors: [generateApiTokenClientInterceptors(options), ...(innerOptions?.interceptors ?? [])],
};
}
return new TaskHubGrpcClient(hostAddress, innerOptions);
}
/**
* Schedule a new workflow using the DurableTask client.
*/
public async scheduleNewWorkflow(
workflow: TWorkflow | string,
input?: any,
instanceId?: string,
startAt?: Date,
): Promise<string> {
if (typeof workflow === "string") {
return await this._innerClient.scheduleNewOrchestration(workflow, input, instanceId, startAt);
}
return await this._innerClient.scheduleNewOrchestration(getFunctionName(workflow), input, instanceId, startAt);
}
/**
* Terminate the workflow associated with the provided instance id.
*
* @param {string} workflowInstanceId - Workflow instance id to terminate.
* @param {any} output - The optional output to set for the terminated workflow instance.
*/
public async terminateWorkflow(workflowInstanceId: string, output: any) {
await this._innerClient.terminateOrchestration(workflowInstanceId, output);
}
/**
* Fetch workflow instance metadata from the configured durable store.
*/
public async getWorkflowState(
workflowInstanceId: string,
getInputsAndOutputs: boolean,
): Promise<WorkflowState | undefined> {
const state = await this._innerClient.getOrchestrationState(workflowInstanceId, getInputsAndOutputs);
if (state !== undefined) {
return new WorkflowState(state);
}
}
/**
* Waits for a workflow to start running
*/
public async waitForWorkflowStart(
workflowInstanceId: string,
fetchPayloads = true,
timeoutInSeconds = 60,
): Promise<WorkflowState | undefined> {
const state = await this._innerClient.waitForOrchestrationStart(
workflowInstanceId,
fetchPayloads,
timeoutInSeconds,
);
if (state !== undefined) {
return new WorkflowState(state);
}
}
/**
* Waits for a workflow to complete running
*/
public async waitForWorkflowCompletion(
workflowInstanceId: string,
fetchPayloads = true,
timeoutInSeconds = 60,
): Promise<WorkflowState | undefined> {
const state = await this._innerClient.waitForOrchestrationCompletion(
workflowInstanceId,
fetchPayloads,
timeoutInSeconds,
);
if (state != undefined) {
return new WorkflowState(state);
}
}
/**
* Sends an event notification message to an awaiting workflow instance
*/
public async raiseEvent(workflowInstanceId: string, eventName: string, eventPayload?: any) {
this._innerClient.raiseOrchestrationEvent(workflowInstanceId, eventName, eventPayload);
}
/**
* Purges the workflow instance state from the workflow state store.
*/
public async purgeWorkflow(workflowInstanceId: string): Promise<boolean> {
const purgeResult = await this._innerClient.purgeOrchestration(workflowInstanceId);
if (purgeResult !== undefined) {
return purgeResult.deletedInstanceCount > 0;
}
return false;
}
/**
* Closes the inner DurableTask client and shutdown the GRPC channel.
*/
public async stop() {
await this._innerClient.stop();
}
}
```
{{% /codetab %}}
{{% codetab %}}
@ -893,6 +1116,7 @@ Now that you've authored a workflow, learn how to manage it.
- [Workflow API reference]({{< ref workflow_api.md >}})
- Try out the full SDK examples:
- [Python example](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
- [JavaScript example](https://github.com/dapr/js-sdk/tree/main/examples/workflow)
- [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Java example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
- [Go example](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)

View File

@ -12,7 +12,7 @@ Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-v
Now that you've [authored the workflow and its activities in your application]({{< ref howto-author-workflow.md >}}), you can start, terminate, and get information about the workflow using HTTP API calls. For more information, read the [workflow API reference]({{< ref workflow_api.md >}}).
{{< tabs Python ".NET" Java Go HTTP >}}
{{< tabs Python JavaScript ".NET" Java Go HTTP >}}
<!--Python-->
{{% codetab %}}
@ -63,6 +63,77 @@ d.terminate_workflow(instance_id=instanceId, workflow_component=workflowComponen
{{% /codetab %}}
<!--JavaScript-->
{{% codetab %}}
Manage your workflow within your code. In the workflow example from the [Author a workflow]({{< ref "howto-author-workflow.md#write-the-application" >}}) guide, the workflow is registered in the code using the following APIs:
- **client.workflow.start**: Start an instance of a workflow
- **client.workflow.get**: Get information on the status of the workflow
- **client.workflow.pause**: Pauses or suspends a workflow instance that can later be resumed
- **client.workflow.resume**: Resumes a paused workflow instance
- **client.workflow.purge**: Removes all metadata related to a specific workflow instance
- **client.workflow.terminate**: Terminate or stop a particular instance of a workflow
```javascript
import { DaprClient } from "@dapr/dapr";
async function printWorkflowStatus(client: DaprClient, instanceId: string) {
const workflow = await client.workflow.get(instanceId);
console.log(
`Workflow ${workflow.workflowName}, created at ${workflow.createdAt.toUTCString()}, has status ${
workflow.runtimeStatus
}`,
);
console.log(`Additional properties: ${JSON.stringify(workflow.properties)}`);
console.log("--------------------------------------------------\n\n");
}
async function start() {
const client = new DaprClient();
// Start a new workflow instance
const instanceId = await client.workflow.start("OrderProcessingWorkflow", {
Name: "Paperclips",
TotalCost: 99.95,
Quantity: 4,
});
console.log(`Started workflow instance ${instanceId}`);
await printWorkflowStatus(client, instanceId);
// Pause a workflow instance
await client.workflow.pause(instanceId);
console.log(`Paused workflow instance ${instanceId}`);
await printWorkflowStatus(client, instanceId);
// Resume a workflow instance
await client.workflow.resume(instanceId);
console.log(`Resumed workflow instance ${instanceId}`);
await printWorkflowStatus(client, instanceId);
// Terminate a workflow instance
await client.workflow.terminate(instanceId);
console.log(`Terminated workflow instance ${instanceId}`);
await printWorkflowStatus(client, instanceId);
// Wait for the workflow to complete, 30 seconds!
await new Promise((resolve) => setTimeout(resolve, 30000));
await printWorkflowStatus(client, instanceId);
// Purge a workflow instance
await client.workflow.purge(instanceId);
console.log(`Purged workflow instance ${instanceId}`);
// This will throw an error because the workflow instance no longer exists.
await printWorkflowStatus(client, instanceId);
}
start().catch((e) => {
console.error(e);
process.exit(1);
});
```
{{% /codetab %}}
<!--NET-->
{{% codetab %}}
@ -320,6 +391,7 @@ Learn more about these HTTP calls in the [workflow API reference guide]({{< ref
- [Try out the Workflow quickstart]({{< ref workflow-quickstart.md >}})
- Try out the full SDK examples:
- [Python example](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py)
- [JavaScript example](https://github.com/dapr/js-sdk/tree/main/examples/workflow)
- [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Java example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
- [Go example](todo)

View File

@ -195,6 +195,7 @@ See the [Reminder usage and execution guarantees section]({{< ref "workflow-arch
- [Try out the Workflow quickstart]({{< ref workflow-quickstart.md >}})
- Try out the following examples:
- [Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
- [JavaScript example](https://github.com/dapr/js-sdk/tree/main/examples/workflow)
- [.NET](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
- [Go example](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)

View File

@ -97,9 +97,7 @@ Child workflows have many benefits:
The return value of a child workflow is its output. If a child workflow fails with an exception, then that exception is surfaced to the parent workflow, just like it is when an activity task fails with an exception. Child workflows also support automatic retry policies.
{{% alert title="Note" color="primary" %}}
Because child workflows are independent of their parents, terminating a parent workflow does not affect any child workflows. You must terminate each child workflow independently using its instance ID.
{{% /alert %}}
Terminating a parent workflow terminates all of the child workflows created by the workflow instance. See [the terminate workflow api]({{< ref "workflow_api.md#terminate-workflow-request" >}}) for more information.
## Durable timers
@ -369,7 +367,7 @@ Failure to follow this rule could result in undefined behavior. Any background p
For example, instead of this:
{{< tabs ".NET" Java Go >}}
{{< tabs ".NET" Java JavaScript Go >}}
{{% codetab %}}
@ -394,17 +392,22 @@ ctx.createTimer(Duration.ofSeconds(5)).await();
{{% codetab %}}
Don't declare JavaScript workflow as `async`. The Node.js runtime doesn't guarantee that asynchronous functions are deterministic.
{{% /codetab %}}
{{% codetab %}}
```go
// DON'T DO THIS!
```
{{% /codetab %}}
{{< /tabs >}}
Do this:
{{< tabs ".NET" Java Go >}}
{{< tabs ".NET" Java JavaScript Go >}}
{{% codetab %}}
@ -428,6 +431,12 @@ ctx.createTimer(Duration.ofSeconds(5)).await();
{{% codetab %}}
Since the Node.js runtime doesn't guarantee that asynchronous functions are deterministic, always declare JavaScript workflow as synchronous generator functions.
{{% /codetab %}}
{{% codetab %}}
```go
// Do this!!
```
@ -465,6 +474,7 @@ To work around these constraints:
- [Workflow API reference]({{< ref workflow_api.md >}})
- Try out the following examples:
- [Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
- [JavaScript](https://github.com/dapr/js-sdk/tree/main/examples/workflow)
- [.NET](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
- [Go example](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)
- [Go](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)

View File

@ -7,7 +7,7 @@ description: "Overview of Dapr Workflow"
---
{{% alert title="Note" color="primary" %}}
Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "#limitations" >}}).
Dapr Workflow is currently in beta. [See known limitations]({{< ref "#limitations" >}}).
{{% /alert %}}
Dapr workflow makes it easy for developers to write business logic and integrations in a reliable way. Since Dapr workflows are stateful, they support long-running and fault-tolerant applications, ideal for orchestrating microservices. Dapr workflow works seamlessly with other Dapr building blocks, such as service invocation, pub/sub, state management, and bindings.
@ -41,7 +41,7 @@ With Dapr Workflow, you can write activities and then orchestrate those activiti
### Child workflows
In addition to activities, you can write workflows to schedule other workflows as child workflows. A child workflow is independent of the parent workflow that started it and support automatic retry policies.
In addition to activities, you can write workflows to schedule other workflows as child workflows. A child workflow has its own instance ID, history, and status that is independent of the parent workflow that started it, except for the fact that terminating the parent workflow terminates all of the child workflows created by it. Child workflow also supports automatic retry policies.
[Learn more about child workflows.]({{< ref "workflow-features-concepts.md#child-workflows" >}})
@ -80,6 +80,7 @@ You can use the following SDKs to author a workflow.
| Language stack | Package |
| - | - |
| Python | [dapr-ext-workflow](https://github.com/dapr/python-sdk/tree/master/ext/dapr-ext-workflow) |
| JavaScript | [DaprWorkflowClient](https://github.com/dapr/js-sdk/blob/main/src/workflow/client/DaprWorkflowClient.ts) |
| .NET | [Dapr.Workflow](https://www.nuget.org/profiles/dapr.io) |
| Java | [io.dapr.workflows](https://dapr.github.io/java-sdk/io/dapr/workflows/package-summary.html) |
| Go | [workflow](https://github.com/dapr/go-sdk/tree/main/client/workflow.go) |
@ -94,20 +95,19 @@ Want to put workflows to the test? Walk through the following quickstart and tut
| ------------------- | ----------- |
| [Workflow quickstart]({{< ref workflow-quickstart.md >}}) | Run a workflow application with four workflow activities to see Dapr Workflow in action |
| [Workflow Python SDK example](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow) | Learn how to create a Dapr Workflow and invoke it using the Python `DaprClient` package. |
| [Workflow JavaScript SDK example](https://github.com/dapr/js-sdk/tree/main/examples/workflow) | Learn how to create a Dapr Workflow and invoke it using the JavaScript SDK. |
| [Workflow .NET SDK example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow) | Learn how to create a Dapr Workflow and invoke it using ASP.NET Core web APIs. |
| [Workflow Java SDK example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows) | Learn how to create a Dapr Workflow and invoke it using the Java `io.dapr.workflows` package. |
| [Workflow Go SDK example](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md) | Learn how to create a Dapr Workflow and invoke it using the Go `workflow` package. |
### Start using workflows directly in your app
Want to skip the quickstarts? Not a problem. You can try out the workflow building block directly in your application. After [Dapr is installed]({{< ref install-dapr-cli.md >}}), you can begin using workflows, starting with [how to author a workflow]({{< ref howto-author-workflow.md >}}).
## Limitations
- **State stores:** For the {{% dapr-latest-version cli="true" %}} beta release of Dapr Workflow, using the NoSQL databases as a state store results in limitations around storing internal states. For example, CosmosDB has a maximum single operation item limit of only 100 states in a single request.
- **Horizontal scaling:** For the {{% dapr-latest-version cli="true" %}} beta release of Dapr Workflow, if you scale out Dapr sidecars or your application pods to more than 2, then the concurrency of the workflow execution drops. It is recommended to test with 1 or 2 instances, and no more than 2.
- **State stores:** As of the 1.12.0 beta release of Dapr Workflow, using the NoSQL databases as a state store results in limitations around storing internal states. For example, CosmosDB has a maximum single operation item limit of only 100 states in a single request.
- **Horizontal scaling:** As of the 1.12.0 beta release of Dapr Workflow, if you scale out Dapr sidecars or your application pods to more than 2, then the concurrency of the workflow execution drops. It is recommended to test with 1 or 2 instances, and no more than 2.
## Watch the demo
@ -123,7 +123,8 @@ Watch [this video for an overview on Dapr Workflow](https://youtu.be/s1p9MNl4VGo
- [Workflow API reference]({{< ref workflow_api.md >}})
- Try out the full SDK examples:
- [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Python example](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
- [JavaScript example](https://github.com/dapr/js-sdk/tree/main/examples/workflow)
- [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Java example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
- [Go example](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)

View File

@ -25,7 +25,7 @@ While the pattern is simple, there are many complexities hidden in the implement
Dapr Workflow solves these complexities by allowing you to implement the task chaining pattern concisely as a simple function in the programming language of your choice, as shown in the following example.
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
<!--python-->
@ -72,6 +72,80 @@ def error_handler(ctx, error):
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
```javascript
import { DaprWorkflowClient, WorkflowActivityContext, WorkflowContext, WorkflowRuntime, TWorkflow } from "@dapr/dapr";
async function start() {
// Update the gRPC client and worker to use a local address and port
const daprHost = "localhost";
const daprPort = "50001";
const workflowClient = new DaprWorkflowClient({
daprHost,
daprPort,
});
const workflowRuntime = new WorkflowRuntime({
daprHost,
daprPort,
});
const hello = async (_: WorkflowActivityContext, name: string) => {
return `Hello ${name}!`;
};
const sequence: TWorkflow = async function* (ctx: WorkflowContext): any {
const cities: string[] = [];
const result1 = yield ctx.callActivity(hello, "Tokyo");
cities.push(result1);
const result2 = yield ctx.callActivity(hello, "Seattle");
cities.push(result2);
const result3 = yield ctx.callActivity(hello, "London");
cities.push(result3);
return cities;
};
workflowRuntime.registerWorkflow(sequence).registerActivity(hello);
// Wrap the worker startup in a try-catch block to handle any errors during startup
try {
await workflowRuntime.start();
console.log("Workflow runtime started successfully");
} catch (error) {
console.error("Error starting workflow runtime:", error);
}
// Schedule a new orchestration
try {
const id = await workflowClient.scheduleNewWorkflow(sequence);
console.log(`Orchestration scheduled with ID: ${id}`);
// Wait for orchestration completion
const state = await workflowClient.waitForWorkflowCompletion(id, undefined, 30);
console.log(`Orchestration completed! Result: ${state?.serializedOutput}`);
} catch (error) {
console.error("Error scheduling or waiting for orchestration:", error);
}
await workflowRuntime.stop();
await workflowClient.stop();
// stop the dapr side car
process.exit(0);
}
start().catch((e) => {
console.error(e);
process.exit(1);
});
```
{{% /codetab %}}
{{% codetab %}}
<!--dotnet-->
@ -237,7 +311,7 @@ In addition to the challenges mentioned in [the previous pattern]({{< ref "workf
Dapr Workflows provides a way to express the fan-out/fan-in pattern as a simple function, as shown in the following example:
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
<!--python-->
@ -279,6 +353,114 @@ def process_results(ctx, final_result: int):
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
```javascript
import {
Task,
DaprWorkflowClient,
WorkflowActivityContext,
WorkflowContext,
WorkflowRuntime,
TWorkflow,
} from "@dapr/dapr";
// Wrap the entire code in an immediately-invoked async function
async function start() {
// Update the gRPC client and worker to use a local address and port
const daprHost = "localhost";
const daprPort = "50001";
const workflowClient = new DaprWorkflowClient({
daprHost,
daprPort,
});
const workflowRuntime = new WorkflowRuntime({
daprHost,
daprPort,
});
function getRandomInt(min: number, max: number): number {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
async function getWorkItemsActivity(_: WorkflowActivityContext): Promise<string[]> {
const count: number = getRandomInt(2, 10);
console.log(`generating ${count} work items...`);
const workItems: string[] = Array.from({ length: count }, (_, i) => `work item ${i}`);
return workItems;
}
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function processWorkItemActivity(context: WorkflowActivityContext, item: string): Promise<number> {
console.log(`processing work item: ${item}`);
// Simulate some work that takes a variable amount of time
const sleepTime = Math.random() * 5000;
await sleep(sleepTime);
// Return a result for the given work item, which is also a random number in this case
// For more information about random numbers in workflow please check
// https://learn.microsoft.com/azure/azure-functions/durable/durable-functions-code-constraints?tabs=csharp#random-numbers
return Math.floor(Math.random() * 11);
}
const workflow: TWorkflow = async function* (ctx: WorkflowContext): any {
const tasks: Task<any>[] = [];
const workItems = yield ctx.callActivity(getWorkItemsActivity);
for (const workItem of workItems) {
tasks.push(ctx.callActivity(processWorkItemActivity, workItem));
}
const results: number[] = yield ctx.whenAll(tasks);
const sum: number = results.reduce((accumulator, currentValue) => accumulator + currentValue, 0);
return sum;
};
workflowRuntime.registerWorkflow(workflow);
workflowRuntime.registerActivity(getWorkItemsActivity);
workflowRuntime.registerActivity(processWorkItemActivity);
// Wrap the worker startup in a try-catch block to handle any errors during startup
try {
await workflowRuntime.start();
console.log("Worker started successfully");
} catch (error) {
console.error("Error starting worker:", error);
}
// Schedule a new orchestration
try {
const id = await workflowClient.scheduleNewWorkflow(workflow);
console.log(`Orchestration scheduled with ID: ${id}`);
// Wait for orchestration completion
const state = await workflowClient.waitForWorkflowCompletion(id, undefined, 30);
console.log(`Orchestration completed! Result: ${state?.serializedOutput}`);
} catch (error) {
console.error("Error scheduling or waiting for orchestration:", error);
}
// stop worker and client
await workflowRuntime.stop();
await workflowClient.stop();
// stop the dapr side car
process.exit(0);
}
start().catch((e) => {
console.error(e);
process.exit(1);
});
```
{{% /codetab %}}
{{% codetab %}}
<!--dotnet-->
@ -439,7 +621,7 @@ Depending on the business needs, there may be a single monitor or there may be m
Dapr Workflow supports this pattern natively by allowing you to implement _eternal workflows_. Rather than writing infinite while-loops ([which is an anti-pattern]({{< ref "workflow-features-concepts.md#infinite-loops-and-eternal-workflows" >}})), Dapr Workflow exposes a _continue-as-new_ API that workflow authors can use to restart a workflow function from the beginning with a new input.
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
<!--python-->
@ -488,6 +670,34 @@ def send_alert(ctx, message: str):
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
```javascript
const statusMonitorWorkflow: TWorkflow = async function* (ctx: WorkflowContext): any {
let duration;
const status = yield ctx.callActivity(checkStatusActivity);
if (status === "healthy") {
// Check less frequently when in a healthy state
// set duration to 1 hour
duration = 60 * 60;
} else {
yield ctx.callActivity(alertActivity, "job unhealthy");
// Check more frequently when in an unhealthy state
// set duration to 5 minutes
duration = 5 * 60;
}
// Put the workflow to sleep until the determined time
ctx.createTimer(duration);
// Restart from the beginning with the updated state
ctx.continueAsNew();
};
```
{{% /codetab %}}
{{% codetab %}}
<!--dotnet-->
@ -609,7 +819,7 @@ The following diagram illustrates this flow.
The following example code shows how this pattern can be implemented using Dapr Workflow.
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
<!--python-->
@ -670,6 +880,146 @@ def place_order(_, order: Order) -> None:
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
```javascript
import {
Task,
DaprWorkflowClient,
WorkflowActivityContext,
WorkflowContext,
WorkflowRuntime,
TWorkflow,
} from "@dapr/dapr";
import * as readlineSync from "readline-sync";
// Wrap the entire code in an immediately-invoked async function
async function start() {
class Order {
cost: number;
product: string;
quantity: number;
constructor(cost: number, product: string, quantity: number) {
this.cost = cost;
this.product = product;
this.quantity = quantity;
}
}
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
// Update the gRPC client and worker to use a local address and port
const daprHost = "localhost";
const daprPort = "50001";
const workflowClient = new DaprWorkflowClient({
daprHost,
daprPort,
});
const workflowRuntime = new WorkflowRuntime({
daprHost,
daprPort,
});
// Activity function that sends an approval request to the manager
const sendApprovalRequest = async (_: WorkflowActivityContext, order: Order) => {
// Simulate some work that takes an amount of time
await sleep(3000);
console.log(`Sending approval request for order: ${order.product}`);
};
// Activity function that places an order
const placeOrder = async (_: WorkflowActivityContext, order: Order) => {
console.log(`Placing order: ${order.product}`);
};
// Orchestrator function that represents a purchase order workflow
const purchaseOrderWorkflow: TWorkflow = async function* (ctx: WorkflowContext, order: Order): any {
// Orders under $1000 are auto-approved
if (order.cost < 1000) {
return "Auto-approved";
}
// Orders of $1000 or more require manager approval
yield ctx.callActivity(sendApprovalRequest, order);
// Approvals must be received within 24 hours or they will be cancled.
const tasks: Task<any>[] = [];
const approvalEvent = ctx.waitForExternalEvent("approval_received");
const timeoutEvent = ctx.createTimer(24 * 60 * 60);
tasks.push(approvalEvent);
tasks.push(timeoutEvent);
const winner = ctx.whenAny(tasks);
if (winner == timeoutEvent) {
return "Cancelled";
}
yield ctx.callActivity(placeOrder, order);
const approvalDetails = approvalEvent.getResult();
return `Approved by ${approvalDetails.approver}`;
};
workflowRuntime
.registerWorkflow(purchaseOrderWorkflow)
.registerActivity(sendApprovalRequest)
.registerActivity(placeOrder);
// Wrap the worker startup in a try-catch block to handle any errors during startup
try {
await workflowRuntime.start();
console.log("Worker started successfully");
} catch (error) {
console.error("Error starting worker:", error);
}
// Schedule a new orchestration
try {
const cost = readlineSync.questionInt("Cost of your order:");
const approver = readlineSync.question("Approver of your order:");
const timeout = readlineSync.questionInt("Timeout for your order in seconds:");
const order = new Order(cost, "MyProduct", 1);
const id = await workflowClient.scheduleNewWorkflow(purchaseOrderWorkflow, order);
console.log(`Orchestration scheduled with ID: ${id}`);
// prompt for approval asynchronously
promptForApproval(approver, workflowClient, id);
// Wait for orchestration completion
const state = await workflowClient.waitForWorkflowCompletion(id, undefined, timeout + 2);
console.log(`Orchestration completed! Result: ${state?.serializedOutput}`);
} catch (error) {
console.error("Error scheduling or waiting for orchestration:", error);
}
// stop worker and client
await workflowRuntime.stop();
await workflowClient.stop();
// stop the dapr side car
process.exit(0);
}
async function promptForApproval(approver: string, workflowClient: DaprWorkflowClient, id: string) {
if (readlineSync.keyInYN("Press [Y] to approve the order... Y/yes, N/no")) {
const approvalEvent = { approver: approver };
await workflowClient.raiseEvent(id, "approval_received", approvalEvent);
} else {
return "Order rejected";
}
}
start().catch((e) => {
console.error(e);
process.exit(1);
});
```
{{% /codetab %}}
{{% codetab %}}
<!--dotnet-->
@ -764,7 +1114,7 @@ public class ExternalSystemInteractionWorkflow extends Workflow {
The code that delivers the event to resume the workflow execution is external to the workflow. Workflow events can be delivered to a waiting workflow instance using the [raise event]({{< ref "howto-manage-workflow.md#raise-an-event" >}}) workflow management API, as shown in the following example:
{{< tabs Python ".NET" Java Go >}}
{{< tabs Python JavaScript ".NET" Java Go >}}
{{% codetab %}}
<!--python-->
@ -783,6 +1133,15 @@ with DaprClient() as d:
{{% /codetab %}}
{{% codetab %}}
<!--javascript-->
```javascript
// Raise the workflow event to the waiting workflow
```
{{% /codetab %}}
{{% codetab %}}
<!--dotnet-->
@ -830,6 +1189,7 @@ External events don't have to be directly triggered by humans. They can also be
- [Workflow API reference]({{< ref workflow_api.md >}})
- Try out the following examples:
- [Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
- [JavaScript](https://github.com/dapr/js-sdk/tree/main/examples/workflow)
- [.NET](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
- [Go example](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)
- [Go](https://github.com/dapr/go-sdk/tree/main/examples/workflow/README.md)

View File

@ -21,8 +21,7 @@ In this guide, you'll:
<img src="/images/workflow-quickstart-overview.png" width=800 style="padding-bottom:15px;">
Select your preferred language-specific Dapr SDK before proceeding with the Quickstart.
{{< tabs "Python" ".NET" "Java" "Go" >}}
{{< tabs "Python" "JavaScript" ".NET" "Java" Go >}}
<!-- Python -->
{{% codetab %}}
@ -264,6 +263,107 @@ In `workflow.py`, the workflow is defined as a class with all of its associated
message=f'Order {order_id} has completed!'))
return OrderResult(processed=True)
```
{{% /codetab %}}
<!-- JavaScript -->
{{% codetab %}}
The `order-processor` console app starts and manages the lifecycle of an order processing workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks:
- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow
- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase
- `ProcessPaymentActivity`: Processes and authorizes the payment
- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value
### Step 1: Pre-requisites
For this example, you will need:
- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
- [Latest Node.js installed](https://nodejs.org/download/).
<!-- IGNORE_LINKS -->
- [Docker Desktop](https://www.docker.com/products/docker-desktop)
<!-- END_IGNORE -->
### Step 2: Set up the environment
Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/workflows).
```bash
git clone https://github.com/dapr/quickstarts.git
```
In a new terminal window, navigate to the `order-processor` directory:
```bash
cd workflows/javascript/sdk/order-processor
```
### Step 3: Run the order processor app
In the terminal, start the order processor app alongside a Dapr sidecar:
```bash
dapr run
```
This starts the `order-processor` app with unique workflow ID and runs the workflow activities.
Expected output:
```
```
### (Optional) Step 4: View in Zipkin
Running `dapr init` launches the [openzipkin/zipkin](https://hub.docker.com/r/openzipkin/zipkin/) Docker container. If the container has stopped running, launch the Zipkin Docker container with the following command:
```
docker run -d -p 9411:9411 openzipkin/zipkin
```
View the workflow trace spans in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
<img src="/images/workflow-trace-spans-zipkin.png" width=800 style="padding-bottom:15px;">
### What happened?
When you ran `dapr run `:
1. A unique order ID for the workflow is generated (in the above example, `6d2abcc9`) and the workflow is scheduled.
1. The `NotifyActivity` workflow activity sends a notification saying an order for 10 cars has been received.
1. The `ReserveInventoryActivity` workflow activity checks the inventory data, determines if you can supply the ordered item, and responds with the number of cars in stock.
1. Your workflow starts and notifies you of its status.
1. The `ProcessPaymentActivity` workflow activity begins processing payment for order `6d2abcc9` and confirms if successful.
1. The `UpdateInventoryActivity` workflow activity updates the inventory with the current available cars after the order has been processed.
1. The `NotifyActivity` workflow activity sends a notification saying that order `6d2abcc9` has completed.
1. The workflow terminates as completed.
#### `order-processor/index.js`
In the application's program file:
- The unique workflow order ID is generated
- The workflow is scheduled
- The workflow status is retrieved
- The workflow and the workflow activities it invokes are registered
```javascript
```
#### `order-processor/Workflows/OrderProcessingWorkflow.js`
In `OrderProcessingWorkflow.js`, the workflow is defined as a class with all of its associated tasks (determined by workflow activities).
```javascript
```
#### `order-processor/Activities` directory
The `Activities` directory holds the four workflow activities used by the workflow, defined in the following files:
{{% /codetab %}}
<!-- .NET -->

View File

@ -50,6 +50,7 @@ The following configuration settings can be applied to Dapr application sidecars
- [Metrics](#metrics)
- [Logging](#logging)
- [Middleware](#middleware)
- [Name resolution](#name-resolution)
- [Scope secret store access](#scope-secret-store-access)
- [Access Control allow lists for building block APIs](#access-control-allow-lists-for-building-block-apis)
- [Access Control allow lists for service invocation API](#access-control-allow-lists-for-service-invocation-api)
@ -106,21 +107,27 @@ The `metrics` section under the `Configuration` spec contains the following prop
```yml
metrics:
enabled: true
rules: []
http:
increasedCardinality: true
```
The following table lists the properties for metrics:
| Property | Type | Description |
|--------------|--------|-------------|
| `enabled` | boolean | Whether metrics should to be enabled. |
| `rules` | boolean | Named rule to filter metrics. Each rule contains a set of `labels` to filter on and a`regex`expression to apply to the metrics path. |
| `enabled` | boolean | When set to true, the default, enables metrics collection and the metrics endpoint. |
| `rules` | array | Named rule to filter metrics. Each rule contains a set of `labels` to filter on and a `regex` expression to apply to the metrics path. |
| `http.increasedCardinality` | boolean | When set to true, in the Dapr HTTP server each request path causes the creation of a new "bucket" of metrics. This can cause issues, including excessive memory consumption, when there many different requested endpoints (such as when interacting with RESTful APIs).<br>In Dapr 1.13 the default value is `true` (to preserve the behavior of Dapr <= 1.12), but will change to `false` in Dapr 1.14. |
To mitigate high memory usage and egress costs associated with [high cardinality metrics]({{< ref "metrics-overview.md#high-cardinality-metrics" >}}), you can set regular expressions for every metric exposed by the Dapr sidecar. For example:
To mitigate high memory usage and egress costs associated with [high cardinality metrics]({{< ref "metrics-overview.md#high-cardinality-metrics" >}}) with the HTTP server, you should set the `metrics.http.increasedCardinality` property to `false`.
Using rules, you can set regular expressions for every metric exposed by the Dapr sidecar. For example:
```yml
metric:
enabled: true
rules:
metrics:
enabled: true
rules:
- name: dapr_runtime_service_invocation_req_sent_total
labels:
- name: method
@ -183,6 +190,29 @@ The following table lists the properties for HTTP handlers:
See [Middleware pipelines]({{< ref "middleware.md" >}}) for more information
#### Name resolution component
You can set name resolution component to use within the configuration YAML. For example, to set the `spec.nameResolution.component` property to `"sqlite"`, pass configuration options in the `spec.nameResolution.configuration` dictionary as shown below.
This is the basic example of a configuration resource:
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: appconfig
spec:
nameResolution:
component: "sqlite"
version: "v1"
configuration:
connectionString: "/home/user/.dapr/nr.db"
```
For more information, see:
- [The name resolution component documentation]({{< ref supported-name-resolution >}}) for more examples.
- - [The Configuration YAML documentation]({{< ref configuration-schema.md >}}) to learn more about how to configure name resolution per component.
#### Scope secret store access
See the [Scoping secrets]({{< ref "secret-scope.md" >}}) guide for information and examples on how to scope secrets to an application.
@ -288,6 +318,9 @@ The `mtls` section contains properties for mTLS.
| `enabled` | bool | If true, enables mTLS for communication between services and apps in the cluster.
| `allowedClockSkew` | string | Allowed tolerance when checking the expiration of TLS certificates, to allow for clock skew. Follows the format used by [Go's time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Default is `15m` (15 minutes).
| `workloadCertTTL` | string | How long a certificate TLS issued by Dapr is valid for. Follows the format used by [Go's time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Default is `24h` (24 hours).
| `sentryAddress` | string | Hostname port address for connecting to the Sentry server. |
| `controlPlaneTrustDomain` | string | Trust domain for the control plane. This is used to verify connection to control plane services. |
| `tokenValidators` | array | Additional Sentry token validators to use for authenticating certificate requests. |
See the [mTLS how-to]({{< ref "mtls.md" >}}) and [security concepts]({{< ref "security-concept.md" >}}) for more information.

View File

@ -3,22 +3,24 @@ type: docs
title: "Configure metrics"
linkTitle: "Overview"
weight: 4000
description: "Enable or disable Dapr metrics "
description: "Enable or disable Dapr metrics"
---
By default, each Dapr system process emits Go runtime/process metrics and has their own [Dapr metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md).
## Prometheus endpoint
The Dapr sidecars exposes a [Prometheus](https://prometheus.io/) metrics endpoint that you can scrape to gain a greater understanding of how Dapr is behaving.
The Dapr sidecar exposes a [Prometheus](https://prometheus.io/)-compatible metrics endpoint that you can scrape to gain a greater understanding of how Dapr is behaving.
## Configuring metrics using the CLI
The metrics application endpoint is enabled by default. You can disable it by passing the command line argument `--enable-metrics=false`.
The default metrics port is `9090`. You can override this by passing the command line argument `--metrics-port` to Daprd.
The default metrics port is `9090`. You can override this by passing the command line argument `--metrics-port` to daprd.
## Configuring metrics in Kubernetes
You can also enable/disable the metrics for a specific application by setting the `dapr.io/enable-metrics: "false"` annotation on your application deployment. With the metrics exporter disabled, `daprd` does not open the metrics listening port.
You can also enable/disable the metrics for a specific application by setting the `dapr.io/enable-metrics: "false"` annotation on your application deployment. With the metrics exporter disabled, daprd does not open the metrics listening port.
The following Kubernetes deployment example shows how metrics are explicitly enabled with the port specified as "9090".
@ -54,10 +56,8 @@ spec:
```
## Configuring metrics using application configuration
You can also enable metrics via application configuration. To disable the metrics collection in the Dapr sidecars running in a specific namespace:
- Use the `metrics` spec configuration.
- Set `enabled: false` to disable the metrics in the Dapr runtime.
You can also enable metrics via application configuration. To disable the metrics collection in the Dapr sidecars by default, set `spec.metrics.enabled` to `false`.
```yaml
apiVersion: dapr.io/v1alpha1
@ -66,17 +66,25 @@ metadata:
name: tracing
namespace: default
spec:
tracing:
samplingRate: "1"
metrics:
enabled: false
```
## High cardinality metrics
Depending on your use case, some metrics emitted by Dapr might contain values that have a high cardinality. This might cause increased memory usage for the Dapr process/container and incur expensive egress costs in certain cloud environments. To mitigate this issue, you can set regular expressions for every metric exposed by the Dapr sidecar. [See a list of all Dapr metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md).
When invoking Dapr using HTTP, the legacy behavior (and current default as of Dapr 1.13) is to create a separate "bucket" for each requested method. When working with RESTful APIs, this can cause very high cardinality, with potential negative impact on memory usage and CPU.
The following example shows how to apply a regular expression for the label `method` in the metric `dapr_runtime_service_invocation_req_sent_total`:
Dapr 1.13 introduces a new option for the Dapr Configuration resource `spec.metrics.http.increasedCardinality`: when set to `false`, it reports metrics for the HTTP server for each "abstract" method (for example, requesting from a state store) instead of creating a "bucket" for each concrete request path.
The default value of `spec.metrics.http.increasedCardinality` is `true` in Dapr 1.13, to maintain the same behavior as Dapr 1.12 and older. However, the value will change to `false` (low-cardinality metrics by default) in Dapr 1.14.
Setting `spec.metrics.http.increasedCardinality` to `false` is **recommended** to all Dapr users, to reduce resource consumption. The pre-1.13 behavior, which is used when the option is `true`, is considered legacy and is only maintained for users who have special requirements around backwards-compatibility.
## Transform metrics with regular expressions
You can set regular expressions for every metric exposed by the Dapr sidecar to "transform" their values. [See a list of all Dapr metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md).
The name of the rule must match the name of the metric that is transformed. The following example shows how to apply a regular expression for the label `method` in the metric `dapr_runtime_service_invocation_req_sent_total`:
```yaml
apiVersion: dapr.io/v1alpha1
@ -84,9 +92,10 @@ kind: Configuration
metadata:
name: daprConfig
spec:
metric:
enabled: true
rules:
metrics:
enabled: true
increasedCardinality: true
rules:
- name: dapr_runtime_service_invocation_req_sent_total
labels:
- name: method
@ -94,14 +103,9 @@ spec:
"orders/": "orders/.+"
```
When this configuration is applied, a recorded metric with the `method` label of `orders/a746dhsk293972nz` will be replaced with `orders/`.
### Watch the demo
Watch [this video to walk through handling high cardinality metrics](https://youtu.be/pOT8teL6j_k?t=1524):
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/pOT8teL6j_k?start=1524" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
When this configuration is applied, a recorded metric with the `method` label of `orders/a746dhsk293972nz` is replaced with `orders/`.
Using regular expressions to reduce metrics cardinality is considered legacy. We encourage all users to set `spec.metrics.http.increasedCardinality` to `false` instead, which is simpler to configure and offers better performance.
## References

View File

@ -14,7 +14,7 @@ For detailed information on mTLS, read the [security concepts section]({{< ref "
If custom certificates have not been provided, Dapr automatically creates and persist self-signed certs valid for one year.
In Kubernetes, the certs are persisted to a secret that resides in the namespace of the Dapr system pods, accessible only to them.
In self hosted mode, the certs are persisted to disk.
In self-hosted mode, the certs are persisted to disk.
## Control plane Sentry service configuration
The mTLS settings reside in a Dapr control plane configuration file. For example when you deploy the Dapr control plane to Kubernetes this configuration file is automatically created and then you can edit this. The following file shows the available settings for mTLS in a configuration resource, deployed in the `daprsystem` namespace:
@ -32,7 +32,7 @@ spec:
allowedClockSkew: "15m"
```
The file here shows the default `daprsystem` configuration settings. The examples below show you how to change and apply this configuration to the control plane Sentry service either in Kubernetes and self hosted modes.
The file here shows the default `daprsystem` configuration settings. The examples below show you how to change and apply this configuration to the control plane Sentry service either in Kubernetes and self-hosted modes.
## Kubernetes
@ -491,3 +491,67 @@ Watch this [video](https://www.youtube.com/watch?v=Hkcx9kBDrAc&feature=youtu.be&
<div class="embed-responsive embed-responsive-16by9">
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/Hkcx9kBDrAc?start=1400"></iframe>
</div>
## Sentry Token Validators
Tokens are often used for authentication and authorization purposes.
Token validators are components responsible for verifying the validity and authenticity of these tokens.
For example in Kubernetes environments, a common approach to token validation is through the Kubernetes bound service account mechanism.
This validator checks bound service account tokens against Kubernetes to ensure their legitimacy.
Sentry service can be configured to:
- Enable extra token validators beyond the Kubernetes bound Service Account validator
- Replace the `insecure` validator enabled by default in self hosted mode
Sentry token validators are used for joining extra non-Kubernetes clients to the Dapr cluster running in Kubernetes mode, or replace the insecure "allow all" validator in self hosted mode to enable proper identity validation.
It is not expected that you will need to configure a token validator unless you are using an exotic deployment scenario.
> The only token validator currently supported is the `jwks` validator.
### JWKS
The `jwks` validator enables Sentry service to validate JWT tokens using a JWKS endpoint.
The contents of the token _must_ contain the `sub` claim which matches the SPIFFE identity of the Dapr client, in the same Dapr format `spiffe://<trust-domain>/ns/<namespace>/<app-id>`.
The audience of the token must by the SPIFFE ID of the Sentry identity, For example, `spiffe://cluster.local/ns/dapr-system/dapr-sentry`.
Other basic JWT rules regarding signature, expiry etc. apply.
The `jwks` validator can accept either a remote source to fetch the public key list or a static array for public keys.
The configuration below enables the `jwks` token validator with a remote source.
This remote source uses HTTPS so the `caCertificate` field contains the root of trust for the remote source.
```yaml
kind: Configuration
apiVersion: dapr.io/v1alpha1
metadata:
name: sentryconfig
spec:
mtls:
enabled: true
tokenValidators:
- name: jwks
options:
minRefreshInterval: 2m
requestTimeout: 1m
source: "https://localhost:1234/"
caCertificate: "<optional ca certificate bundle string>"
```
The configuration below enables the `jwks` token validator with a static array of public keys.
```yaml
kind: Configuration
apiVersion: dapr.io/v1alpha1
metadata:
name: sentryconfig
spec:
mtls:
enabled: true
tokenValidators:
- name: jwks
options:
minRefreshInterval: 2m
requestTimeout: 1m
source: |
{"keys":[ "12345.." ]}
```

View File

@ -60,6 +60,13 @@ Terminate a running workflow instance with the given name and instance ID.
POST http://localhost:3500/v1.0-beta1/workflows/<workflowComponentName>/<instanceId>/terminate
```
{{% alert title="Note" color="primary" %}}
Terminating a workflow terminates all of the child workflows created by the workflow instance.
Terminating a workflow has no effect on any in-flight activity executions that were started by the terminated instance.
{{% /alert %}}
### URL parameters
Parameter | Description
@ -174,6 +181,11 @@ Purge the workflow state from your state store with the workflow's instance ID.
POST http://localhost:3500/v1.0-beta1/workflows/<workflowComponentName>/<instanceId>/purge
```
{{% alert title="Note" color="primary" %}}
Purging a workflow purges all of the child workflows created by the workflow instance.
{{% /alert %}}
### URL parameters
Parameter | Description

View File

@ -36,6 +36,8 @@ This table is meant to help users understand the equivalent options for running
| `--metrics-port` | `--metrics-port` | | `dapr.io/metrics-port` | Sets the port for the sidecar metrics server. Default is `9090` |
| `--mode` | not supported | | not supported | Runtime hosting option mode for Dapr, either `"standalone"` or `"kubernetes"` (default `"standalone"`). [Learn more.]({{< ref hosting >}}) |
| `--placement-host-address` | `--placement-host-address` | | `dapr.io/placement-host-address` | Comma separated list of addresses for Dapr Actor Placement servers. When no annotation is set, the default value is set by the Sidecar Injector. When the annotation is set and the value is empty, the sidecar does not connect to Placement server. This can be used when there are no actors running in the sidecar. When the annotation is set and the value is not empty, the sidecar connects to the configured address. For example: `127.0.0.1:50057,127.0.0.1:50058` |
| `--actors-service` | not supported | | not supported | Configuration for the service that offers actor placement information. The format is `<name>:<address>`. For example, setting this value to `placement:127.0.0.1:50057,127.0.0.1:50058` is an alternative to using the `--placement-host-address` flag. |
| `--reminders-service` | not supported | | not supported | Configuration for the service that enables actor reminders. The format is `<name>[:<address>]`. Currently, the only supported value is `"default"` (which is also the default value), which uses the built-in reminders subsystem in the Dapr sidecar. |
| `--profiling-port` | `--profiling-port` | | not supported | The port for the profile server (default `7777`) |
| `--app-protocol` | `--app-protocol` | `-P` | `dapr.io/app-protocol` | Configures the protocol Dapr uses to communicate with your app. Valid options are `http`, `grpc`, `https` (HTTP with TLS), `grpcs` (gRPC with TLS), `h2c` (HTTP/2 Cleartext). Note that Dapr does not validate TLS certificates presented by the app. Default is `http` |
| `--enable-app-health-check` | `--enable-app-health-check` | | `dapr.io/enable-app-health-check` | Boolean that enables the [health checks]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `false`. |

View File

@ -3,12 +3,14 @@ type: docs
title: "Name resolution provider component specs"
linkTitle: "Name resolution"
weight: 8000
description: The supported name resolution providers that interface with Dapr service invocation
description: The supported name resolution providers to enable Dapr service invocation
no_list: true
---
The following components provide name resolution for the service invocation building block.
Name resolution components are configured via the [configuration]({{< ref configuration-overview.md >}}).
{{< partial "components/description.html" >}}
{{< partial "components/name-resolution.html" >}}

View File

@ -1,6 +1,6 @@
---
type: docs
title: "Kubernetes DNS name resolution provider spec"
title: "Kubernetes DNS"
linkTitle: "Kubernetes DNS"
description: Detailed information on the Kubernetes DNS name resolution component
---

View File

@ -1,6 +1,6 @@
---
type: docs
title: "mDNS name resolution provider spec"
title: "mDNS"
linkTitle: "mDNS"
description: Detailed information on the mDNS name resolution component
---

View File

@ -0,0 +1,54 @@
---
type: docs
title: "SQLite"
linkTitle: "SQLite"
description: Detailed information on the SQLite name resolution component
---
As an alternative to mDNS, the SQLite name resolution component can be used for running Dapr on single-node environments and for local development scenarios. Dapr sidecars that are part of the cluster store their information in a SQLite database on the local machine.
{{% alert title="Note" color="primary" %}}
This component is optimized to be used in scenarios where all Dapr instances are running on the same physical machine, where the database is accessed through the same, locally-mounted disk.
Using the SQLite nameresolver with a database file accessed over the network (including via SMB/NFS) can lead to issues such as data corruption, and is **not supported**.
{{% /alert %}}
## Configuration format
Name resolution is configured via the [Dapr Configuration]({{< ref configuration-overview.md >}}).
Within the Configuration YAML, set the `spec.nameResolution.component` property to `"sqlite"`, then pass configuration options in the `spec.nameResolution.configuration` dictionary.
This is the basic example of a Configuration resource:
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: appconfig
spec:
nameResolution:
component: "sqlite"
version: "v1"
configuration:
connectionString: "/home/user/.dapr/nr.db"
```
## Spec configuration fields
When using the SQLite name resolver component, the `spec.nameResolution.configuration` dictionary contains these options:
| Field | Required | Type | Details | Examples |
|--------------|:--------:|-----:|:---------|----------|
| `connectionString` | Y | `string` | The connection string for the SQLite database. Normally, this is the path to a file on disk, relative to the current working directory, or absolute. | `"nr.db"` (relative to the working directory), `"/home/user/.dapr/nr.db"` |
| `updateInterval` | N | [Go duration](https://pkg.go.dev/time#ParseDuration) (as a `string`) | Interval for active Dapr sidecars to update their status in the database, which is used as healthcheck.<br>Smaller intervals reduce the likelihood of stale data being returned if an application goes offline, but increase the load on the database.<br>Must be at least 1s greater than `timeout`. Values with fractions of seconds are truncated (for example, `1500ms` becomes `1s`). Default: `5s` | `"2s"` |
| `timeout` | N | [Go duration](https://pkg.go.dev/time#ParseDuration) (as a `string`).<br>Must be at least 1s. | Timeout for operations on the database. Integers are interpreted as number of seconds. Defaults to `1s` | `"2s"`, `2` |
| `tableName` | N | `string` | Name of the table where the data is stored. If the table does not exist, the table is created by Dapr. Defaults to `hosts`. | `"hosts"` |
| `metadataTableName` | N | `string` | Name of the table used by Dapr to store metadata for the component. If the table does not exist, the table is created by Dapr. Defaults to `metadata`. | `"metadata"` |
| `cleanupInterval` | N | [Go duration](https://pkg.go.dev/time#ParseDuration) (as a `string`) | Interval to remove stale records from the database. Default: `1h` (1 hour) | `"10m"` |
| `busyTimeout` | N | [Go duration](https://pkg.go.dev/time#ParseDuration) (as a `string`) | Interval to wait in case the SQLite database is currently busy serving another request, before returning a "database busy" error. This is an advanced setting.</br>`busyTimeout` controls how locking works in SQLite. With SQLite, writes are exclusive, so every time any app is writing the database is locked. If another app tries to write, it waits up to `busyTimeout` before returning the "database busy" error. However the `timeout` setting controls the timeout for the entire operation. For example if the query "hangs", after the database has acquired the lock (so after busy timeout is cleared), then `timeout` comes into effect. Default: `800ms` (800 milliseconds) | `"100ms"` |
| `disableWAL` | N | `bool` | If set to true, disables Write-Ahead Logging for journaling of the SQLite database. This is for advanced scenarios only | `true`, `false` |
## Related links
- [Service invocation building block]({{< ref service-invocation >}})

View File

@ -1,6 +1,6 @@
---
type: docs
title: "HashiCorp Consul name resolution provider spec"
title: "HashiCorp Consul"
linkTitle: "HashiCorp Consul"
description: Detailed information on the HashiCorp Consul name resolution component
---

View File

@ -73,7 +73,7 @@ spec:
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. If a value for `consumerGroup` is provided, any value for `consumerID` is ignored - a combination of the consumer group and a random unique identifier will be set for the `consumerID` instead. | `"channel1"`
| clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"namespace.appID"` for Kubernetes mode or `"appID"` for Self-Hosted mode. | `"my-namespace.my-dapr-app"`, `"my-dapr-app"`
| authRequired | N | *Deprecated* Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"`
| authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"`
| authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, `oidc` or `awsiam` | `"password"`, `"none"`
| saslUsername | N | The SASL username used for authentication. Only required if `authType` is set to `"password"`. | `"adminuser"`
| saslPassword | N | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authType is set to `"password"`. | `""`, `"KeFg23!"`
| saslMechanism | N | The SASL Authentication Mechanism you wish to use. Only required if `authType` is set to `"password"`. Defaults to `PLAINTEXT` | `"SHA-512", "SHA-256", "PLAINTEXT"`
@ -92,6 +92,12 @@ spec:
| oidcClientSecret | N | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` |
| oidcScopes | N | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` |
| oidcExtensions | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` |
| awsRegion | N | The AWS region where the Kafka cluster is deployed to. Required when `authType` is set to `awsiam` | `us-west-1` |
| awsAccessKey | N | AWS access key associated with an IAM account. | `"accessKey"`
| awsSecretKey | N | The secret key associated with the access key. | `"secretKey"`
| awsSessionToken | N | AWS session token to use. A session token is only required if you are using temporary security credentials. | `"sessionToken"`
| awsIamRoleArn | N | IAM role that has access to AWS Managed Streaming for Apache Kafka (MSK). This is another option to authenticate with MSK aside from the AWS Credentials. | `"arn:aws:iam::123456789:role/mskRole"`
| awsStsSessionName | N | Represents the session name for assuming a role. | `"MSKSASLDefaultSession"`
| schemaRegistryURL | N | Required when using Schema Registry Avro serialization/deserialization. The Schema Registry URL. | `http://localhost:8081` |
| schemaRegistryAPIKey | N | When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Key. | `XYAXXAZ` |
| schemaRegistryAPISecret | N | When using Schema Registry Avro serialization/deserialization. The Schema Registry credentials API Secret. | `ABCDEFGMEADFF` |
@ -107,7 +113,17 @@ The metadata `version` must be set to `1.0.0` when using Azure EventHubs with Ka
Kafka supports a variety of authentication schemes and Dapr supports several: SASL password, mTLS, OIDC/OAuth2. With the added authentication methods, the `authRequired` field has
been deprecated from the v1.6 release and instead the `authType` field should be used. If `authRequired` is set to `true`, Dapr will attempt to configure `authType` correctly
based on the value of `saslPassword`. There are four valid values for `authType`: `none`, `password`, `certificate`, `mtls`, and `oidc`. Note this is authentication only; authorization is still configured within Kafka.
based on the value of `saslPassword`. The valid values for `authType` are:
- `none`
- `password`
- `certificate`
- `mtls`
- `oidc`
- `awsiam`
{{% alert title="Note" color="primary" %}}
`authType` is _authentication_ only. _Authorization_ is still configured within Kafka, except for `awsiam`, which can also drive authorization decisions configured in AWS IAM.
{{% /alert %}}
#### None
@ -276,6 +292,44 @@ spec:
value: 0.10.2.0
```
#### AWS IAM
Authenticating with AWS IAM is supported with MSK. Setting `authType` to `awsiam` uses AWS SDK to generate auth tokens to authenticate.
{{% alert title="Note" color="primary" %}}
The only required metadata field is `awsRegion`. If no `awsAccessKey` and `awsSecretKey` are provided, you can use AWS IAM roles for service accounts to have password-less authentication to your Kafka cluster.
{{% /alert %}}
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: kafka-pubsub-awsiam
spec:
type: pubsub.kafka
version: v1
metadata:
- name: brokers # Required. Kafka broker connection setting
value: "dapr-kafka.myapp.svc.cluster.local:9092"
- name: consumerGroup # Optional. Used for input bindings.
value: "group1"
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
value: "my-dapr-app-id"
- name: authType # Required.
value: "awsiam"
- name: awsRegion # Required.
value: "us-west-1"
- name: awsAccessKey # Optional.
value: <AWS_ACCESS_KEY>
- name: awsSecretKey # Optional.
value: <AWS_SECRET_KEY>
- name: awsSessionToken # Optional.
value: <AWS_SESSION_KEY>
- name: awsIamRoleArn # Optional.
value: "arn:aws:iam::123456789:role/mskRole"
- name: awsStsSessionName # Optional.
value: "MSKSASLDefaultSession"
```
### Communication using TLS
By default TLS is enabled to secure the transport layer to Kafka. To disable TLS, set `disableTls` to `true`. When TLS is enabled, you can

View File

@ -1,133 +0,0 @@
---
type: docs
title: "NATS Streaming"
linkTitle: "NATS Streaming"
description: "Detailed documentation on the NATS Streaming pubsub component"
aliases:
- "/operations/components/setup-pubsub/supported-pubsub/setup-nats-streaming/"
---
## ⚠️ Deprecation notice
{{% alert title="Warning" color="warning" %}}
This component is **deprecated** because the [NATS Streaming Server](https://nats-io.gitbook.io/legacy-nats-docs/nats-streaming-server-aka-stan/developing-with-stan) was deprecated in June 2023 and no longer receives updates. Users are encouraged to switch to using [JetStream]({{< ref setup-jetstream >}}) as an alternative.
This component will be **removed in the Dapr v1.13 release**.
{{% /alert %}}
## Component format
To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: natsstreaming-pubsub
spec:
type: pubsub.natsstreaming
version: v1
metadata:
- name: natsURL
value: "nats://localhost:4222"
- name: natsStreamingClusterID
value: "clusterId"
- name: concurrencyMode
value: parallel
- name: consumerID # Optional. If not supplied, runtime will create one.
value: "channel1"
# below are subscription configuration.
- name: subscriptionType
value: <REPLACE-WITH-SUBSCRIPTION-TYPE> # Required. Allowed values: topic, queue.
- name: ackWaitTime
value: "" # Optional.
- name: maxInFlight
value: "" # Optional.
- name: durableSubscriptionName
value: "" # Optional.
# following subscription options - only one can be used
- name: deliverNew
value: <bool>
- name: startAtSequence
value: 1
- name: startWithLastReceived
value: false
- name: deliverAll
value: false
- name: startAtTimeDelta
value: ""
- name: startAtTime
value: ""
- name: startAtTimeFormat
value: ""
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to [use a secret store for the secrets]({{< ref component-secrets.md >}}).
{{% /alert %}}
## Spec metadata fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| natsURL | Y | NATS server address URL | "`nats://localhost:4222`"|
| natsStreamingClusterID | Y | NATS cluster ID |`"clusterId"`|
| subscriptionType | Y | Subscription type. Allowed values `"topic"`, `"queue"` | `"topic"` |
| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"`
| ackWaitTime | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"300ms"`|
| maxInFlight | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"25"` |
| durableSubscriptionName | N | [Durable subscriptions](https://docs.nats.io/developing-with-nats-streaming/durables) identification name. | `"my-durable"`|
| deliverNew | N | Subscription Options. Only one can be used. Deliver new messages only | `"true"`, `"false"` |
| startAtSequence | N | Subscription Options. Only one can be used. Sets the desired start sequence position and state | `"100000"`, `"230420"` |
| startWithLastReceived | N | Subscription Options. Only one can be used. Sets the start position to last received. | `"true"`, `"false"` |
| deliverAll | N | Subscription Options. Only one can be used. Deliver all available messages | `"true"`, `"false"` |
| startAtTimeDelta | N | Subscription Options. Only one can be used. Sets the desired start time position and state using the delta | `"10m"`, `"23s"` |
| startAtTime | N | Subscription Options. Only one can be used. Sets the desired start time position and state | `"Feb 3, 2013 at 7:54pm (PST)"` |
| startAtTimeFormat | N | Must be used with `startAtTime`. Sets the format for the time | `"Jan 2, 2006 at 3:04pm (MST)"` |
| concurrencyMode | N | Call the subscriber sequentially (“single” message at a time), or concurrently (in “parallel”). Default: `"parallel"` | `"single"`, `"parallel"`
## Create a NATS server
{{< tabs "Self-Hosted" "Kubernetes">}}
{{% codetab %}}
Run a NATS server locally using Docker:
```bash
docker run -d --name nats-streaming -p 4222:4222 -p 8222:8222 nats-streaming
```
Interact with the server using the client port: `localhost:4222`.
{{% /codetab %}}
{{% codetab %}}
Install NATS on Kubernetes by using the [kubectl](https://docs.nats.io/running-a-nats-service/introduction/running/nats-kubernetes/):
```bash
# Single server NATS
kubectl apply -f https://raw.githubusercontent.com/nats-io/k8s/master/nats-server/single-server-nats.yml
kubectl apply -f https://raw.githubusercontent.com/nats-io/k8s/master/nats-streaming-server/single-server-stan.yml
```
This installs a single NATS-Streaming and NATS into the `default` namespace. To interact with NATS, find the service with:
```bash
kubectl get svc stan
```
For example, if installing using the example above, the NATS Streaming address would be:
`<YOUR-HOST>:4222`
{{% /codetab %}}
{{< /tabs >}}
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}}).
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components.
- [Pub/Sub building block]({{< ref pubsub >}}).
- [NATS Streaming Deprecation Notice](https://github.com/nats-io/nats-streaming-server/#warning--deprecation-notice-warning).

View File

@ -18,7 +18,9 @@ metadata:
name: <NAME>
spec:
type: state.azure.blobstorage
version: v1
# Supports v1 and v2. Users should always use v2 by default. There is no
# migration path from v1 to v2, see `versioning` below.
version: v2
metadata:
- name: accountName
value: "[your_account_name]"
@ -32,21 +34,32 @@ spec:
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
{{% /alert %}}
## Versioning
Dapr has 2 versions of the Azure Blob Storage state store component: `v1` and `v2`. It is recommended to use `v2` for all new applications. `v1` is considered legacy and is preserved for compatibility with existing applications only.
In `v1`, a longstanding implementation issue was identified, where the [key prefix]({{< ref howto-share-state.md >}}) was incorrectly stripped by the component, essentially behaving as if `keyPrefix` was always set to `none`.
The updated `v2` of the component fixes the incorrect behavior and makes the state store correctly respect the `keyPrefix` property.
While `v1` and `v2` have the same metadata fields, they are otherwise incompatible, with no automatic data migration path for `v1` to `v2`.
If you are using `v1` of this component, you should continue to use `v1` until you create a new state store.
## Spec metadata fields
| Field | Required | Details | Example |
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| `accountName` | Y | The storage account name | `"mystorageaccount"`.
| `accountKey` | Y (unless using Microsoft Entra ID) | Primary or secondary storage key | `"key"`
| `containerName` | Y | The name of the container to be used for Dapr state. The container will be created for you if it doesn't exist | `"container"`
| `azureEnvironment` | N | Optional name for the Azure environment if using a different Azure cloud | `"AZUREPUBLICCLOUD"` (default value), `"AZURECHINACLOUD"`, `"AZUREUSGOVERNMENTCLOUD"`, `"AZUREGERMANCLOUD"`
| `accountName` | Y | The storage account name | `"mystorageaccount"`. |
| `accountKey` | Y (unless using Microsoft Entra ID) | Primary or secondary storage key | `"key"` |
| `containerName` | Y | The name of the container to be used for Dapr state. The container will be created for you if it doesn't exist | `"container"` |
| `azureEnvironment` | N | Optional name for the Azure environment if using a different Azure cloud | `"AZUREPUBLICCLOUD"` (default value), `"AZURECHINACLOUD"`, `"AZUREUSGOVERNMENTCLOUD"` |
| `endpoint` | N | Optional custom endpoint URL. This is useful when using the [Azurite emulator](https://github.com/Azure/azurite) or when using custom domains for Azure Storage (although this is not officially supported). The endpoint must be the full base URL, including the protocol (`http://` or `https://`), the IP or FQDN, and optional port. | `"http://127.0.0.1:10000"`
| `ContentType` | N | The blob's content type | `"text/plain"`
| `ContentMD5` | N | The blob's MD5 hash | `"vZGKbMRDAnMs4BIwlXaRvQ=="`
| `ContentEncoding` | N | The blob's content encoding | `"UTF-8"`
| `ContentLanguage` | N | The blob's content language | `"en-us"`
| `ContentDisposition` | N | The blob's content disposition. Conveys additional information about how to process the response payload | `"attachment"`
| `CacheControl` | N | The blob's cache control | `"no-cache"`
| `ContentType` | N | The blob's content type | `"text/plain"` |
| `ContentMD5` | N | The blob's MD5 hash | `"vZGKbMRDAnMs4BIwlXaRvQ=="` |
| `ContentEncoding` | N | The blob's content encoding | `"UTF-8"` |
| `ContentLanguage` | N | The blob's content language | `"en-us"` |
| `ContentDisposition` | N | The blob's content disposition. Conveys additional information about how to process the response payload | `"attachment"` |
| `CacheControl`| N | The blob's cache control | `"no-cache"` |
## Setup Azure Blob Storage

View File

@ -1,13 +1,23 @@
---
type: docs
title: "PostgreSQL"
linkTitle: "PostgreSQL"
description: Detailed information on the PostgreSQL state store component
title: "PostgreSQL v1"
linkTitle: "PostgreSQL v1"
description: Detailed information on the PostgreSQL v1 state store component
aliases:
- "/operations/components/setup-state-store/supported-state-stores/setup-postgresql/"
- "/operations/components/setup-state-store/supported-state-stores/setup-postgres/"
- "/operations/components/setup-state-store/supported-state-stores/setup-postgresql-v1/"
- "/operations/components/setup-state-store/supported-state-stores/setup-postgres-v1/"
---
This component allows using PostgreSQL (Postgres) as state store for Dapr. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
{{% alert title="Note" color="primary" %}}
Starting with Dapr 1.13, you can leverage the [PostgreSQL v2]({{< ref setup-postgresql-v2.md >}}) state store component, which contains some improvements to performance and reliability.
The v2 component is not compatible with v1, and data cannot be migrated between the two components. The v2 component does not offer support for state store query APIs.
There are no plans to deprecate the v1 component.
{{% /alert %}}
This component allows using PostgreSQL (Postgres) as state store for Dapr, using the "v1" component. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@ -21,8 +31,8 @@ spec:
# Connection string
- name: connectionString
value: "<CONNECTION STRING>"
# Timeout for database operations, in seconds (optional)
#- name: timeoutInSeconds
# Timeout for database operations, as a Go duration or number of seconds (optional)
#- name: timeout
# value: 20
# Name of the table where to store the state (optional)
#- name: tableName
@ -31,8 +41,8 @@ spec:
#- name: metadataTableName
# value: "dapr_metadata"
# Cleanup interval in seconds, to remove expired rows (optional)
#- name: cleanupIntervalInSeconds
# value: 3600
#- name: cleanupInterval
# value: "1h"
# Maximum number of connections pooled by this component (optional)
#- name: maxConns
# value: 0
@ -59,7 +69,7 @@ The following metadata options are **required** to authenticate using a PostgreS
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"`
| `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"` |
### Authenticate using Microsoft Entra ID
@ -77,10 +87,10 @@ Authenticating with Microsoft Entra ID is supported with Azure Database for Post
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| `timeoutInSeconds` | N | Timeout, in seconds, for all database operations. Defaults to `20` | `30`
| `tableName` | N | Name of the table where the data is stored. Defaults to `state`. Can optionally have the schema name as prefix, such as `public.state` | `"state"`, `"public.state"`
| `metadataTableName` | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. Can optionally have the schema name as prefix, such as `public.dapr_metadata` | `"dapr_metadata"`, `"public.dapr_metadata"`
| `cleanupIntervalInSeconds` | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `3600` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `1800`, `-1`
| `timeout` | N | Timeout for operations on the database, as a [Go duration](https://pkg.go.dev/time#ParseDuration). Integers are interpreted as number of seconds. Defaults to `20s` | `"30s"`, `30` |
| `cleanupInterval` | N | Interval, as a Go duration or number of seconds, to clean up rows with an expired TTL. Default: `1h` (1 hour). Setting this to values <=0 disables the periodic cleanup. | `"30m"`, `1800`, `-1`
| `maxConns` | N | Maximum number of connections pooled by this component. Set to 0 or lower to use the default value, which is the greater of 4 or the number of CPUs. | `"4"`
| `connectionMaxIdleTime` | N | Max idle time before unused connections are automatically closed in the connection pool. By default, there's no value and this is left to the database driver to choose. | `"5m"`
| `queryExecMode` | N | Controls the default mode for executing queries. By default Dapr uses the extended protocol and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as PGBouncer. In this case it may be preferrable to use `exec` or `simple_protocol`. | `"simple_protocol"`
@ -100,8 +110,8 @@ Authenticating with Microsoft Entra ID is supported with Azure Database for Post
> This example does not describe a production configuration because it sets the password in plain text and the user name is left as the PostgreSQL default of "postgres".
2. Create a database for state data.
Either the default "postgres" database can be used, or create a new database for storing state data.
1. Create a database for state data.
Either the default "postgres" database can be used, or create a new database for storing state data.
To create a new database in PostgreSQL, run the following SQL command:
@ -121,10 +131,10 @@ This state store supports [Time-To-Live (TTL)]({{< ref state-store-ttl.md >}}) f
Because PostgreSQL doesn't have built-in support for TTLs, this is implemented in Dapr by adding a column in the state table indicating when the data is to be considered "expired". Records that are "expired" are not returned to the caller, even if they're still physically stored in the database. A background "garbage collector" periodically scans the state table for expired rows and deletes them.
The interval at which the deletion of expired records happens is set with the `cleanupIntervalInSeconds` metadata property, which defaults to 3600 seconds (that is, 1 hour).
You can set the deletion interval of expired records with the `cleanupInterval` metadata property, which defaults to 3600 seconds (that is, 1 hour).
- Longer intervals require less frequent scans for expired rows, but can require storing expired records for longer, potentially requiring more storage space. If you plan to store many records in your state table, with short TTLs, consider setting `cleanupIntervalInSeconds` to a smaller value, for example `300` (300 seconds, or 5 minutes).
- If you do not plan to use TTLs with Dapr and the PostgreSQL state store, you should consider setting `cleanupIntervalInSeconds` to a value <= 0 (e.g. `0` or `-1`) to disable the periodic cleanup and reduce the load on the database.
- Longer intervals require less frequent scans for expired rows, but can require storing expired records for longer, potentially requiring more storage space. If you plan to store many records in your state table, with short TTLs, consider setting `cleanupInterval` to a smaller value; for example, `5m` (5 minutes).
- If you do not plan to use TTLs with Dapr and the PostgreSQL state store, you should consider setting `cleanupInterval` to a value <= 0 (for example, `0` or `-1`) to disable the periodic cleanup and reduce the load on the database.
The column in the state table where the expiration date for records is stored in, `expiredate`, **does not have an index by default**, so each periodic cleanup must perform a full-table scan. If you have a table with a very large number of records, and only some of them use a TTL, you may find it useful to create an index on that column. Assuming that your state table name is `state` (the default), you can use this query:

View File

@ -0,0 +1,165 @@
---
type: docs
title: "PostgreSQL"
linkTitle: "PostgreSQL"
description: Detailed information on the PostgreSQL state store component
aliases:
- "/operations/components/setup-state-store/supported-state-stores/setup-postgresql-v2/"
- "/operations/components/setup-state-store/supported-state-stores/setup-postgres-v2/"
---
{{% alert title="Note" color="primary" %}}
This is the v2 of the PostgreSQL state store component, which contains some improvements to performance and reliability. New applications are encouraged to use v2.
The PostgreSQL v2 state store component is not compatible with the [v1 component]({{< ref setup-postgresql-v1.md >}}), and data cannot be migrated between the two components. The v2 component does not offer support for state store query APIs.
There are no plans to deprecate the v1 component.
{{% /alert %}}
This component allows using PostgreSQL (Postgres) as state store for Dapr, using the "v2" component. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: <NAME>
spec:
type: state.postgresql
# Note: setting "version" to "v2" is required to use the v2 of the component
version: v2
metadata:
# Connection string
- name: connectionString
value: "<CONNECTION STRING>"
# Timeout for database operations, as a Go duration or number of seconds (optional)
#- name: timeout
# value: 20
# Prefix for the table where the data is stored (optional)
#- name: tablePrefix
# value: ""
# Name of the table where to store metadata used by Dapr (optional)
#- name: metadataTableName
# value: "dapr_metadata"
# Cleanup interval in seconds, to remove expired rows (optional)
#- name: cleanupInterval
# value: "1h"
# Maximum number of connections pooled by this component (optional)
#- name: maxConns
# value: 0
# Max idle time for connections before they're closed (optional)
#- name: connectionMaxIdleTime
# value: 0
# Controls the default mode for executing queries. (optional)
#- name: queryExecMode
# value: ""
# Uncomment this if you wish to use PostgreSQL as a state store for actors (optional)
#- name: actorStateStore
# value: "true"
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
{{% /alert %}}
## Spec metadata fields
### Authenticate using a connection string
The following metadata options are **required** to authenticate using a PostgreSQL connection string.
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"` |
### Authenticate using Microsoft Entra ID
Authenticating with Microsoft Entra ID is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity.
| Field | Required | Details | Example |
|--------|:--------:|---------|---------|
| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Microsoft Entra ID. | `"true"` |
| `connectionString` | Y | The connection string for the PostgreSQL database.<br>This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Microsoft Entra ID identity. This is often the name of the corresponding principal (for example, the name of the Microsoft Entra ID application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` |
| `azureTenantId` | N | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-…"` |
| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` |
| `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` |
### Other metadata options
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| `tablePrefix` | N | Prefix for the table where the data is stored. Can optionally have the schema name as prefix, such as `public.prefix_` | `"prefix_"`, `"public.prefix_"` |
| `metadataTableName` | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. Can optionally have the schema name as prefix, such as `public.dapr_metadata` | `"dapr_metadata"`, `"public.dapr_metadata"` |
| `timeout` | N | Timeout for operations on the database, as a [Go duration](https://pkg.go.dev/time#ParseDuration). Integers are interpreted as number of seconds. Defaults to `20s` | `"30s"`, `30` |
| `cleanupInterval` | N | Interval, as a Go duration or number of seconds, to clean up rows with an expired TTL. Default: `1h` (1 hour). Setting this to values <=0 disables the periodic cleanup. | `"30m"`, `1800`, `-1` |
| `maxConns` | N | Maximum number of connections pooled by this component. Set to 0 or lower to use the default value, which is the greater of 4 or the number of CPUs. | `"4"` |
| `connectionMaxIdleTime` | N | Max idle time before unused connections are automatically closed in the connection pool. By default, there's no value and this is left to the database driver to choose. | `"5m"` |
| `queryExecMode` | N | Controls the default mode for executing queries. By default Dapr uses the extended protocol and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as PGBouncer. In this case, it may be preferrable to use `exec` or `simple_protocol`. | `"simple_protocol"` |
| `actorStateStore` | N | Consider this state store for actors. Defaults to `"false"` | `"true"`, `"false"` |
## Setup PostgreSQL
{{< tabs "Self-Hosted" >}}
{{% codetab %}}
1. Run an instance of PostgreSQL. You can run a local instance of PostgreSQL in Docker with the following command:
```bash
docker run -p 5432:5432 -e POSTGRES_PASSWORD=example postgres
```
> This example does not describe a production configuration because it sets the password in plain text and the user name is left as the PostgreSQL default of "postgres".
2. Create a database for state data.
Either the default "postgres" database can be used, or create a new database for storing state data.
To create a new database in PostgreSQL, run the following SQL command:
```sql
CREATE DATABASE my_dapr;
```
{{% /codetab %}}
{{% /tabs %}}
## Advanced
### Differences between v1 and v2
The PostgreSQL state store v2 was introduced in Dapr 1.13. The [pre-existing v1]({{< ref setup-postgresql-v1.md >}}) remains available and is not deprecated.
In the v2 component, the table schema has been changed significantly, with the goal of increasing performance and reliability. Most notably, the value stored by Dapr is now of type _BYTEA_, which allows faster queries and, in some cases, is more space-efficient than the previously-used _JSONB_ column.
However, due to this change, the v2 component does not support the [Dapr state store query APIs]({{< ref howto-state-query-api.md >}}).
Also, in the v2 component, ETags are now random UUIDs, which ensures better compatibility with other PostgreSQL-compatible databases, such as CockroachDB.
Because of these changes, v1 and v2 components are not able to read or write data from the same table. At this stage, it's also impossible to migrate data between the two versions of the component.
### Displaying the data in human-readable format
The PostgreSQL v2 component stores the state's value in the `value` column, which is of type _BYTEA_. Most PostgreSQL tools, including pgAdmin, consider the value as binary and do not display it in human-readable form by default.
If you want to inspect the value in the state store, and you know it's not binary (for example, JSON data), you can have the value displayed in human-readable form using a query like the following:
```sql
-- Replace "state" with the name of the state table in your environment
SELECT *, convert_from(value, 'utf-8') FROM state;
```
### TTLs and cleanups
This state store supports [Time-To-Live (TTL)]({{< ref state-store-ttl.md >}}) for records stored with Dapr. When storing data using Dapr, you can set the `ttlInSeconds` metadata property to indicate after how many seconds the data should be considered "expired".
Because PostgreSQL doesn't have built-in support for TTLs, this is implemented in Dapr by adding a column in the state table indicating when the data is to be considered "expired". Records that are "expired" are not returned to the caller, even if they're still physically stored in the database. A background "garbage collector" periodically scans the state table for expired rows and deletes them.
You can set the deletion interval of expired records with the `cleanupInterval` metadata property, which defaults to 3600 seconds (that is, 1 hour).
- Longer intervals require less frequent scans for expired rows, but can require storing expired records for longer, potentially requiring more storage space. If you plan to store many records in your state table, with short TTLs, consider setting `cleanupInterval` to a smaller value; for example, `5m` (5 minutes).
- If you do not plan to use TTLs with Dapr and the PostgreSQL state store, you should consider setting `cleanupInterval` to a value <= 0 (for example, `0` or `-1`) to disable the periodic cleanup and reduce the load on the database.
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components
- [State management building block]({{< ref state-management >}})

View File

@ -50,14 +50,14 @@ spec:
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| `connectionString` | Y | The connection string for the SQLite database. See below for more details. | `"path/to/data.db"`, `"file::memory:?cache=shared"`
| `timeoutInSeconds` | N | Timeout, in seconds, for all database operations. Defaults to `20` | `30`
| `tableName` | N | Name of the table where the data is stored. Defaults to `state`. | `"state"`
| `metadataTableName` | N | Name of the table used by Dapr to store metadata for the component. Defaults to `metadata`. | `"metadata"`
| `cleanupInterval` | N | Interval, as a [Go duration](https://pkg.go.dev/time#ParseDuration), to clean up rows with an expired TTL. Setting this to values <=0 disables the periodic cleanup. Default: `0` (i.e. disabled) | `"2h"`, `"30m"`, `-1`
| `busyTimeout` | N | Interval, as a [Go duration](https://pkg.go.dev/time#ParseDuration), to wait in case the SQLite database is currently busy serving another request, before returning a "database busy" error. Default: `2s` | `"100ms"`, `"5s"`
| `disableWAL` | N | If set to true, disables Write-Ahead Logging for journaling of the SQLite database. You should set this to `false` if the database is stored on a network file system (e.g. a folder mounted as a SMB or NFS share). This option is ignored for read-only or in-memory databases. | `"100ms"`, `"5s"`
| `actorStateStore` | N | Consider this state store for actors. Defaults to `"false"` | `"true"`, `"false"`
| `connectionString` | Y | The connection string for the SQLite database. See below for more details. | `"path/to/data.db"`, `"file::memory:?cache=shared"` |
| `timeout` | N | Timeout for operations on the database, as a [Go duration](https://pkg.go.dev/time#ParseDuration). Integers are interpreted as number of seconds. Defaults to `20s` | `"30s"`, `30` |
| `tableName` | N | Name of the table where the data is stored. Defaults to `state`. | `"state"` |
| `metadataTableName` | N | Name of the table used by Dapr to store metadata for the component. Defaults to `metadata`. | `"metadata"` |
| `cleanupInterval` | N | Interval, as a [Go duration](https://pkg.go.dev/time#ParseDuration), to clean up rows with an expired TTL. Setting this to values <=0 disables the periodic cleanup. Default: `0` (i.e. disabled) | `"2h"`, `"30m"`, `-1` |
| `busyTimeout` | N | Interval, as a [Go duration](https://pkg.go.dev/time#ParseDuration), to wait in case the SQLite database is currently busy serving another request, before returning a "database busy" error. Default: `2s` | `"100ms"`, `"5s"` |
| `disableWAL` | N | If set to true, disables Write-Ahead Logging for journaling of the SQLite database. You should set this to `false` if the database is stored on a network file system (for example, a folder mounted as a SMB or NFS share). This option is ignored for read-only or in-memory databases. | `"true"`, `"false"` |
| `actorStateStore` | N | Consider this state store for actors. Defaults to `"false"` | `"true"`, `"false"` |
The **`connectionString`** parameter configures how to open the SQLite database.

View File

@ -27,8 +27,17 @@ spec:
stdout: true
otel:
endpointAddress: <REPLACE-WITH-ENDPOINT-ADDRESS>
isSecure: false
isSecure: <TRUE-OR-FALSE>
protocol: <HTTP-OR-GRPC>
metrics:
enabled: <TRUE-OR-FALSE>
rules:
- name: <METRIC-NAME>
labels:
- name: <LABEL-NAME>
regex: {}
http:
increasedCardinality: <TRUE-OR-FALSE>
httpPipeline: # for incoming http calls
handlers:
- name: <HANDLER-NAME>
@ -37,6 +46,11 @@ spec:
handlers:
- name: <HANDLER-NAME>
type: <HANDLER-TYPE>
nameResolution:
component: <NAME-OF-NAME-RESOLUTION-COMPONENT>
version: <NAME-RESOLUTION-COMPONENT-VERSION>
configuration:
<NAME-RESOLUTION-COMPONENT-METADATA-CONFIGURATION>
secrets:
scopes:
- storeName: <NAME-OF-SCOPED-STORE>

View File

@ -3,3 +3,8 @@
state: Alpha
version: v1
since: "1.2"
- component: SQLite
link: nr-sqlite
state: Alpha
version: v1
since: "1.13"

View File

@ -46,14 +46,6 @@
features:
bulkPublish: false
bulkSubscribe: false
- component: NATS Streaming
link: setup-nats-streaming
state: Deprecated
version: v1
since: "1.11"
features:
bulkPublish: false
bulkSubscribe: false
- component: RabbitMQ
link: setup-rabbitmq
state: Stable

View File

@ -1,8 +1,8 @@
- component: Azure Blob Storage
link: setup-azure-blobstorage
state: Stable
version: v1
since: "1.0"
version: v2
since: "1.13"
features:
crud: true
transactions: false

View File

@ -141,8 +141,8 @@
etag: true
ttl: true
query: false
- component: PostgreSQL
link: setup-postgresql
- component: PostgreSQL v1
link: setup-postgresql-v1
state: Stable
version: v1
since: "1.0"
@ -152,6 +152,17 @@
etag: true
ttl: true
query: true
- component: PostgreSQL v2
link: setup-postgresql-v2
state: Stable
version: v2
since: "1.13"
features:
crud: true
transactions: true
etag: true
ttl: true
query: false
- component: Redis
link: setup-redis
state: Stable