Merge branch 'v1.10' into issue_2798b

Signed-off-by: Hannah Hunter <94493363+hhunter-ms@users.noreply.github.com>
This commit is contained in:
Hannah Hunter 2023-02-02 16:40:15 -06:00 committed by GitHub
commit 80025e2a86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 1376 additions and 501 deletions

View File

@ -23,7 +23,11 @@ The sidecar APIs are called from your application over local http or gRPC endpoi
## Self-hosted with `dapr run`
When Dapr is installed in [self-hosted mode]({{<ref self-hosted>}}), the `daprd` binary is downloaded and placed under the user home directory (`$HOME/.dapr/bin` for Linux/MacOS or `%USERPROFILE%\.dapr\bin\` for Windows). In self-hosted mode, running the Dapr CLI [`run` command]({{< ref dapr-run.md >}}) launches the `daprd` executable together with the provided application executable. This is the recommended way of running the Dapr sidecar when working locally in scenarios such as development and testing. The various arguments the CLI exposes to configure the sidecar can be found in the [Dapr run command reference]({{<ref dapr-run>}}).
When Dapr is installed in [self-hosted mode]({{<ref self-hosted>}}), the `daprd` binary is downloaded and placed under the user home directory (`$HOME/.dapr/bin` for Linux/macOS or `%USERPROFILE%\.dapr\bin\` for Windows).
In self-hosted mode, running the Dapr CLI [`run` command]({{< ref dapr-run.md >}}) launches the `daprd` executable with the provided application executable. This is the recommended way of running the Dapr sidecar when working locally in scenarios such as development and testing.
You can find the various arguments that the CLI exposes to configure the sidecar in the [Dapr run command reference]({{<ref dapr-run>}}).
## Kubernetes with `dapr-sidecar-injector`
@ -37,7 +41,9 @@ For a detailed list of all available arguments run `daprd --help` or see this [t
### Examples
1. Start a sidecar along with an application by specifying its unique ID. Note `--app-id` is a required field:
1. Start a sidecar alongside an application by specifying its unique ID.
**Note:** `--app-id` is a required field, and cannot contain dots.
```bash
daprd --app-id myapp

View File

@ -37,3 +37,10 @@ Dapr provides a way to determine its health using an [HTTP `/healthz` endpoint](
- Determined for readiness and liveness
Read more on about how to apply [dapr health checks]({{< ref sidecar-health >}}) to your application.
## Next steps
- [Learn more about resiliency]({{< ref resiliency-overview.md >}})
- Try out one of the Resiliency quickstarts:
- [Resiliency: Service-to-service]({{< ref resiliency-serviceinvo-quickstart.md >}})
- [Resiliency: State Management]({{< ref resiliency-state-quickstart.md >}})

View File

@ -0,0 +1,285 @@
---
type: docs
title: "Publish and subscribe to bulk messages"
linkTitle: "Publish and subscribe to bulk messages"
weight: 7100
description: "Learn how to use the bulk publish and subscribe APIs in Dapr."
---
{{% alert title="alpha" color="warning" %}}
The bulk publish and subscribe APIs are in **alpha** stage.
{{% /alert %}}
With the bulk publish and subscribe APIs, you can publish and subscribe to multiple messages in a single request. When writing applications that need to send or receive a large number of messages, using bulk operations allows achieving high throughput by reducing the overall number of requests between the Dapr sidecar, the application, and the underlying pub/sub broker.
## Publishing messages in bulk
### Restrictions when publishing messages in bulk
The bulk publish API allows you to publish multiple messages to a topic in a single request. It is *non-transactional*, i.e., from a single bulk request, some messages can succeed and some can fail. If any of the messages fail to publish, the bulk publish operation returns a list of failed messages.
The bulk publish operation also does not guarantee any ordering of messages.
### Example
{{< tabs Java Javascript Dotnet Python Go "HTTP API (Bash)" "HTTP API (PowerShell)" >}}
{{% codetab %}}
```java
import io.dapr.client.DaprClientBuilder;
import io.dapr.client.DaprPreviewClient;
import io.dapr.client.domain.BulkPublishResponse;
import io.dapr.client.domain.BulkPublishResponseFailedEntry;
import java.util.ArrayList;
import java.util.List;
class BulkPublisher {
private static final String PUBSUB_NAME = "my-pubsub-name";
private static final String TOPIC_NAME = "topic-a";
public void publishMessages() {
try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) {
// Create a list of messages to publish
List<String> messages = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String message = String.format("This is message #%d", i);
messages.add(message);
}
// Publish list of messages using the bulk publish API
BulkPublishResponse<String> res = client.publishEvents(PUBSUB_NAME, TOPIC_NAME, "text/plain", messages).block();
}
}
}
```
{{% /codetab %}}
{{% codetab %}}
```typescript
import { DaprClient } from "@dapr/dapr";
const pubSubName = "my-pubsub-name";
const topic = "topic-a";
async function start() {
const client = new DaprClient();
// Publish multiple messages to a topic.
await client.pubsub.publishBulk(pubSubName, topic, ["message 1", "message 2", "message 3"]);
// Publish multiple messages to a topic with explicit bulk publish messages.
const bulkPublishMessages = [
{
entryID: "entry-1",
contentType: "application/json",
event: { hello: "foo message 1" },
},
{
entryID: "entry-2",
contentType: "application/cloudevents+json",
event: {
specversion: "1.0",
source: "/some/source",
type: "example",
id: "1234",
data: "foo message 2",
datacontenttype: "text/plain"
},
},
{
entryID: "entry-3",
contentType: "text/plain",
event: "foo message 3",
},
];
await client.pubsub.publishBulk(pubSubName, topic, bulkPublishMessages);
}
start().catch((e) => {
console.error(e);
process.exit(1);
});
```
{{% /codetab %}}
{{% codetab %}}
```csharp
using System;
using System.Collections.Generic;
using Dapr.Client;
const string PubsubName = "my-pubsub-name";
const string TopicName = "topic-a";
IReadOnlyList<object> BulkPublishData = new List<object>() {
new { Id = "17", Amount = 10m },
new { Id = "18", Amount = 20m },
new { Id = "19", Amount = 30m }
};
using var client = new DaprClientBuilder().Build();
var res = await client.BulkPublishEventAsync(PubsubName, TopicName, BulkPublishData);
if (res == null) {
throw new Exception("null response from dapr");
}
if (res.FailedEntries.Count > 0)
{
Console.WriteLine("Some events failed to be published!");
foreach (var failedEntry in res.FailedEntries)
{
Console.WriteLine("EntryId: " + failedEntry.Entry.EntryId + " Error message: " +
failedEntry.ErrorMessage);
}
}
else
{
Console.WriteLine("Published all events!");
}
```
{{% /codetab %}}
{{% codetab %}}
```python
import requests
import json
base_url = "http://localhost:3500/v1.0-alpha1/publish/bulk/{}/{}"
pubsub_name = "my-pubsub-name"
topic_name = "topic-a"
payload = [
{
"entryId": "ae6bf7c6-4af2-11ed-b878-0242ac120002",
"event": "first text message",
"contentType": "text/plain"
},
{
"entryId": "b1f40bd6-4af2-11ed-b878-0242ac120002",
"event": {
"message": "second JSON message"
},
"contentType": "application/json"
}
]
response = requests.post(base_url.format(pubsub_name, topic_name), json=payload)
print(response.status_code)
```
{{% /codetab %}}
{{% codetab %}}
```go
package main
import (
"fmt"
"strings"
"net/http"
"io/ioutil"
)
const (
pubsubName = "my-pubsub-name"
topicName = "topic-a"
baseUrl = "http://localhost:3500/v1.0-alpha1/publish/bulk/%s/%s"
)
func main() {
url := fmt.Sprintf(baseUrl, pubsubName, topicName)
method := "POST"
payload := strings.NewReader(`[
{
"entryId": "ae6bf7c6-4af2-11ed-b878-0242ac120002",
"event": "first text message",
"contentType": "text/plain"
},
{
"entryId": "b1f40bd6-4af2-11ed-b878-0242ac120002",
"event": {
"message": "second JSON message"
},
"contentType": "application/json"
}
]`)
client := &http.Client {}
req, _ := http.NewRequest(method, url, payload)
req.Header.Add("Content-Type", "application/json")
res, err := client.Do(req)
// ...
}
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -X POST http://localhost:3500/v1.0-alpha1/publish/bulk/my-pubsub-name/topic-a \
-H 'Content-Type: application/json' \
-d '[
{
"entryId": "ae6bf7c6-4af2-11ed-b878-0242ac120002",
"event": "first text message",
"contentType": "text/plain"
},
{
"entryId": "b1f40bd6-4af2-11ed-b878-0242ac120002",
"event": {
"message": "second JSON message"
},
"contentType": "application/json"
},
]'
```
{{% /codetab %}}
{{% codetab %}}
```powershell
Invoke-RestMethod -Method Post -ContentType 'application/json' -Uri 'http://localhost:3500/v1.0-alpha1/publish/bulk/my-pubsub-name/topic-a' `
-Body '[
{
"entryId": "ae6bf7c6-4af2-11ed-b878-0242ac120002",
"event": "first text message",
"contentType": "text/plain"
},
{
"entryId": "b1f40bd6-4af2-11ed-b878-0242ac120002",
"event": {
"message": "second JSON message"
},
"contentType": "application/json"
},
]'
```
{{% /codetab %}}
{{< /tabs >}}
## How components handle publishing and subscribing to bulk messages
Some pub/sub brokers support sending and receiving multiple messages in a single request. When a component supports bulk publish or subscribe operations, Dapr runtime uses them to further optimize the communication between the Dapr sidecar and the underlying pub/sub broker.
For components that do not have bulk publish or subscribe support, Dapr runtime uses the regular publish and subscribe APIs to send and receive messages one by one. This is still more efficient than directly using the regular publish or subscribe APIs, because applications can still send/receive multiple messages in a single request to/from Dapr.
## Supported components
Refer to the [component reference]({{< ref supported-pubsub >}}) to see which components support bulk publish and subscribe operations.
## Related links
- List of [supported pub/sub components]({{< ref supported-pubsub >}})
- Read the [API reference]({{< ref pubsub_api.md >}})

View File

@ -119,6 +119,10 @@ By default, all topic messages associated with an instance of a pub/sub componen
Dapr can set a timeout message on a per-message basis, meaning that if the message is not read from the pub/sub component, then the message is discarded. This timeout message prevents a build up of unread messages. If a message has been in the queue longer than the configured TTL, it is marked as dead. For more information, read [pub/sub message TTL]({{< ref pubsub-message-ttl.md >}}).
### Publish and subscribe to bulk messages
Dapr supports sending and receiving multiple messages in a single request. When writing applications that need to send or receive a large number of messages, using bulk operations allows achieving high throughput by reducing the overall number of requests. For more information, read [pub/sub bulk messages]({{< ref pubsub-bulk.md >}}).
## Try out pub/sub
### Quickstarts and tutorials

View File

@ -191,25 +191,24 @@ using System.Threading;
//code
namespace EventService
{
class Program
{
static async Task Main(string[] args)
{
while(true) {
System.Threading.Thread.Sleep(5000);
Random random = new Random();
int orderId = random.Next(1,1000);
CancellationTokenSource source = new CancellationTokenSource();
CancellationToken cancellationToken = source.Token;
using var client = new DaprClientBuilder().Build();
//Using Dapr SDK to invoke a method
var result = client.CreateInvokeMethodRequest(HttpMethod.Get, "checkout", "checkout/" + orderId, cancellationToken);
await client.InvokeMethodAsync(result);
Console.WriteLine("Order requested: " + orderId);
Console.WriteLine("Result: " + result);
}
}
}
class Program
{
static async Task Main(string[] args)
{
while(true) {
System.Threading.Thread.Sleep(5000);
Random random = new Random();
int orderId = random.Next(1,1000);
using var client = new DaprClientBuilder().Build();
//Using Dapr SDK to invoke a method
var result = client.CreateInvokeMethodRequest(HttpMethod.Get, "checkout", "checkout/" + orderId);
await client.InvokeMethodAsync(result);
Console.WriteLine("Order requested: " + orderId);
Console.WriteLine("Result: " + result);
}
}
}
}
```

View File

@ -11,27 +11,36 @@ aliases:
Dapr allows custom processing pipelines to be defined by chaining a series of middleware components. In this guide, you'll learn how to create a middleware component. To learn how to configure an existing middleware component, see [Configure middleware components]({{< ref middleware.md >}})
## Writing a custom middleware
## Writing a custom HTTP middleware
Dapr uses [FastHTTP](https://github.com/valyala/fasthttp) to implement its HTTP server. Hence, your HTTP middleware needs to be written as a FastHTTP handler. Your middleware needs to implement a middleware interface, which defines a **GetHandler** method that returns **fasthttp.RequestHandler** and **error**:
HTTP middlewares in Dapr wrap standard Go [net/http](https://pkg.go.dev/net/http) handler functions.
Your middleware needs to implement a middleware interface, which defines a **GetHandler** method that returns a [**http.Handler**](https://pkg.go.dev/net/http#Handler) callback and an **error**:
```go
type Middleware interface {
GetHandler(metadata Metadata) (func(h fasthttp.RequestHandler) fasthttp.RequestHandler, error)
GetHandler(metadata middleware.Metadata) (func(next http.Handler) http.Handler, error)
}
```
Your handler implementation can include any inbound logic, outbound logic, or both:
The handler receives a `next` callback that should be invoked to continue processing the request.
Your handler implementation can include an inbound logic, outbound logic, or both:
```go
func (m *customMiddleware) GetHandler(metadata Metadata) (func(fasthttp.RequestHandler) fasthttp.RequestHandler, error) {
func (m *customMiddleware) GetHandler(metadata middleware.Metadata) (func(next http.Handler) http.Handler, error) {
var err error
return func(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
// inbound logic
h(ctx) // call the downstream handler
// outbound logic
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Inbound logic
// ...
// Call the next handler
next.ServeHTTP(w, r)
// Outbound logic
// ...
}
}, err
}

View File

@ -6,10 +6,6 @@ weight: 120
description: "Get started with Dapr's resiliency capabilities via the service invocation API"
---
{{% alert title="Note" color="primary" %}}
Resiliency is currently a preview feature.
{{% /alert %}}
Observe Dapr resiliency capabilities by simulating a system failure. In this Quickstart, you will:
- Run two microservice applications: `checkout` and `order-processor`. `checkout` will continuously make Dapr service invocation requests to `order-processor`.
@ -59,10 +55,10 @@ pip3 install -r requirements.txt
Run the `order-processor` service alongside a Dapr sidecar.
```bash
dapr run --app-port 8001 --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3501 -- python3 app.py
dapr run --app-port 8001 --app-id order-processor --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3501 -- python3 app.py
```
### Step 3: Run the `checkout` service application with resiliency enabled
### Step 3: Run the `checkout` service application
In a new terminal window, from the root of the Quickstart directory, navigate to the `checkout` directory.
@ -76,13 +72,13 @@ Install dependencies:
pip3 install -r requirements.txt
```
Run the `checkout` service alongside a Dapr sidecar. The `--config` parameter applies a Dapr configuration that enables the resiliency feature.
Run the `checkout` service alongside a Dapr sidecar.
```bash
dapr run --app-id checkout --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3500 -- python3 app.py
dapr run --app-id checkout --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3500 -- python3 app.py
```
By enabling resiliency, the resiliency spec located in the components directory is detected and loaded by the Dapr sidecar:
The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -287,10 +283,10 @@ npm install
Run the `order-processor` service alongside a Dapr sidecar.
```bash
dapr run --app-port 5001 --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3501 -- npm start
dapr run --app-port 5001 --app-id order-processor --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3501 -- npm start
```
### Step 3: Run the `checkout` service application with resiliency enabled
### Step 3: Run the `checkout` service application
In a new terminal window, from the root of the Quickstart directory,
navigate to the `checkout` directory.
@ -305,13 +301,14 @@ Install dependencies:
npm install
```
Run the `checkout` service alongside a Dapr sidecar. The `--config` parameter applies a Dapr configuration that enables the resiliency feature.
Run the `checkout` service alongside a Dapr sidecar.
```bash
dapr run --app-id checkout --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3500 -- npm start
dapr run --app-id checkout --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3500 -- npm start
```
By enabling resiliency, the resiliency spec located in the components directory is detected and loaded by the Dapr sidecar:
The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -450,7 +447,7 @@ Once you restart the `order-processor` service, the application will recover sea
In the `order-processor` service terminal, restart the application:
```bash
dapr run --app-port 5001 --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3501 -- npm start
dapr run --app-port 5001 --app-id order-processor --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3501 -- npm start
```
`checkout` service output:
@ -518,10 +515,10 @@ dotnet build
Run the `order-processor` service alongside a Dapr sidecar.
```bash
dapr run --app-port 7001 --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3501 -- dotnet run
dapr run --app-port 7001 --app-id order-processor --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3501 -- dotnet run
```
### Step 3: Run the `checkout` service application with resiliency enabled
### Step 3: Run the `checkout` service application
In a new terminal window, from the root of the Quickstart directory,
navigate to the `checkout` directory.
@ -537,13 +534,13 @@ dotnet restore
dotnet build
```
Run the `checkout` service alongside a Dapr sidecar. The `--config` parameter applies a Dapr configuration that enables the resiliency feature.
Run the `checkout` service alongside a Dapr sidecar.
```bash
dapr run --app-id checkout --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3500 -- dotnet run
dapr run --app-id checkout --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3500 -- dotnet run
```
By enabling resiliency, the resiliency spec located in the components directory is detected and loaded by the Dapr sidecar:
The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -751,10 +748,10 @@ mvn clean install
Run the `order-processor` service alongside a Dapr sidecar.
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-port 9001 --app-protocol http --dapr-http-port 3501 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
dapr run --app-id order-processor --resources-path ../../../resources/ --app-port 9001 --app-protocol http --dapr-http-port 3501 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
```
### Step 3: Run the `checkout` service application with resiliency enabled
### Step 3: Run the `checkout` service application
In a new terminal window, from the root of the Quickstart directory,
navigate to the `checkout` directory.
@ -769,13 +766,14 @@ Install dependencies:
mvn clean install
```
Run the `checkout` service alongside a Dapr sidecar. The `--config` parameter applies a Dapr configuration that enables the resiliency feature.
Run the `checkout` service alongside a Dapr sidecar.
```bash
dapr run --app-id checkout --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3500 -- java -jar target/CheckoutService-0.0.1-SNAPSHOT.jar
dapr run --app-id checkout --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3500 -- java -jar target/CheckoutService-0.0.1-SNAPSHOT.jar
```
By enabling resiliency, the resiliency spec located in the components directory is detected and loaded by the Dapr sidecar:
The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -914,7 +912,7 @@ Once you restart the `order-processor` service, the application will recover sea
In the `order-processor` service terminal, restart the application:
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-port 9001 --app-protocol http --dapr-http-port 3501 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
dapr run --app-id order-processor --resources-path ../../../resources/ --app-port 9001 --app-protocol http --dapr-http-port 3501 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
```
`checkout` service output:
@ -980,10 +978,10 @@ go build .
Run the `order-processor` service alongside a Dapr sidecar.
```bash
dapr run --app-port 6001 --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3501 -- go run .
dapr run --app-port 6001 --app-id order-processor --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3501 -- go run .
```
### Step 3: Run the `checkout` service application with resiliency enabled
### Step 3: Run the `checkout` service application
In a new terminal window, from the root of the Quickstart directory,
navigate to the `checkout` directory.
@ -998,13 +996,14 @@ Install dependencies:
go build .
```
Run the `checkout` service alongside a Dapr sidecar. The `--config` parameter applies a Dapr configuration that enables the resiliency feature.
Run the `checkout` service alongside a Dapr sidecar.
```bash
dapr run --app-id checkout --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3500 -- go run .
dapr run --app-id checkout --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3500 -- go run .
```
By enabling resiliency, the resiliency spec located in the components directory is detected and loaded by the Dapr sidecar:
The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -1143,7 +1142,7 @@ Once you restart the `order-processor` service, the application will recover sea
In the `order-processor` service terminal, restart the application:
```bash
dapr run --app-port 6001 --app-id order-processor --config ../config.yaml --components-path ../../../components/ --app-protocol http --dapr-http-port 3501 -- go run .
dapr run --app-port 6001 --app-id order-processor --resources-path ../../../resources/ --app-protocol http --dapr-http-port 3501 -- go run .
```
`checkout` service output:

View File

@ -6,13 +6,9 @@ weight: 110
description: "Get started with Dapr's resiliency capabilities via the state management API"
---
{{% alert title="Note" color="primary" %}}
Resiliency is currently a preview feature.
{{% /alert %}}
Observe Dapr resiliency capabilities by simulating a system failure. In this Quickstart, you will:
- Execute a microservice application with resiliency enabled that continuously persists and retrieves state via Dapr's state management API.
- Execute a microservice application that continuously persists and retrieves state via Dapr's state management API.
- Trigger resiliency policies by simulating a system failure.
- Resolve the failure and the microservice application will resume.
@ -54,9 +50,10 @@ Install dependencies
pip3 install -r requirements.txt
```
### Step 2: Run the application with resiliency enabled
### Step 2: Run the application
Run the `order-processor` service alongside a Dapr sidecar. The Dapr sidecar then loads the resiliency spec located in the resources directory:
Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` command below, the `--config` parameter applies a Dapr configuration that enables the resiliency feature. By enabling resiliency, the resiliency spec located in the components directory is loaded by the `order-processor` sidecar. The resilency spec is:
```yaml
apiVersion: dapr.io/v1alpha1
@ -89,7 +86,7 @@ Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` co
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components/ -- python3
dapr run --app-id order-processor --resources-path ../../../resources/ -- python3
```
Once the application has started, the `order-processor`service writes and reads `orderId` key/value pairs to the `statestore` Redis instance [defined in the `statestore.yaml` component]({{< ref "statemanagement-quickstart.md#statestoreyaml-component-file" >}}).
@ -132,7 +129,7 @@ Once Redis is stopped, the requests begin to fail and the retry policy titled `r
INFO[0006] Error processing operation component[statestore] output. Retrying...
```
As per the `retryFroever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
As per the `retryForever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
```yaml
retryForever:
@ -223,9 +220,10 @@ Install dependencies
npm install
```
### Step 2: Run the application with resiliency enabled
### Step 2: Run the application
Run the `order-processor` service alongside a Dapr sidecar. The Dapr sidecar then loads the resiliency spec located in the resources directory:
Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` command below, the `--config` parameter applies a Dapr configuration that enables the resiliency feature. By enabling resiliency, the resiliency spec located in the components directory is loaded by the `order-processor` sidecar. The resilency spec is:
```yaml
apiVersion: dapr.io/v1alpha1
@ -257,7 +255,7 @@ Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` co
```
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components/ -- npm start
dapr run --app-id order-processor --resources-path ../../../resources/ -- npm start
```
Once the application has started, the `order-processor`service writes and reads `orderId` key/value pairs to the `statestore` Redis instance [defined in the `statestore.yaml` component]({{< ref "statemanagement-quickstart.md#statestoreyaml-component-file" >}}).
@ -300,7 +298,7 @@ Once Redis is stopped, the requests begin to fail and the retry policy titled `r
INFO[0006] Error processing operation component[statestore] output. Retrying...
```
As per the `retryFroever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
As per the `retryForever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
```yaml
retryForever:
@ -392,9 +390,9 @@ dotnet restore
dotnet build
```
### Step 2: Run the application with resiliency enabled
### Step 2: Run the application
Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` command below, the `--config` parameter applies a Dapr configuration that enables the resiliency feature. By enabling resiliency, the resiliency spec located in the components directory is loaded by the `order-processor` sidecar. The resilency spec is:
Run the `order-processor` service alongside a Dapr sidecar. The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -426,7 +424,7 @@ Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` co
```
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components/ -- dotnet run
dapr run --app-id order-processor --resources-path ../../../resources/ -- dotnet run
```
Once the application has started, the `order-processor`service writes and reads `orderId` key/value pairs to the `statestore` Redis instance [defined in the `statestore.yaml` component]({{< ref "statemanagement-quickstart.md#statestoreyaml-component-file" >}}).
@ -469,7 +467,7 @@ Once Redis is stopped, the requests begin to fail and the retry policy titled `r
INFO[0006] Error processing operation component[statestore] output. Retrying...
```
As per the `retryFroever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
As per the `retryForever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
```yaml
retryForever:
@ -563,9 +561,9 @@ Install dependencies
mvn clean install
```
### Step 2: Run the application with resiliency enabled
### Step 2: Run the application
Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` command below, the `--config` parameter applies a Dapr configuration that enables the resiliency feature. By enabling resiliency, the resiliency spec located in the components directory is loaded by the `order-processor` sidecar. The resilency spec is:
Run the `order-processor` service alongside a Dapr sidecar. The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -597,7 +595,7 @@ Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` co
```
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components/ -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
dapr run --app-id order-processor --resources-path ../../../resources/ -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar
```
Once the application has started, the `order-processor`service writes and reads `orderId` key/value pairs to the `statestore` Redis instance [defined in the `statestore.yaml` component]({{< ref "statemanagement-quickstart.md#statestoreyaml-component-file" >}}).
@ -640,7 +638,7 @@ Once Redis is stopped, the requests begin to fail and the retry policy titled `r
INFO[0006] Error processing operation component[statestore] output. Retrying...
```
As per the `retryFroever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
As per the `retryForever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
```yaml
retryForever:
@ -731,9 +729,9 @@ Install dependencies
go build .
```
### Step 2: Run the application with resiliency enabled
### Step 2: Run the application
Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` command below, the `--config` parameter applies a Dapr configuration that enables the resiliency feature. By enabling resiliency, the resiliency spec located in the components directory is loaded by the `order-processor` sidecar. The resilency spec is:
Run the `order-processor` service alongside a Dapr sidecar. The Dapr sidecar then loads the resiliency spec located in the resources directory:
```yaml
apiVersion: dapr.io/v1alpha1
@ -765,7 +763,7 @@ Run the `order-processor` service alongside a Dapr sidecar. In the `dapr run` co
```
```bash
dapr run --app-id order-processor --config ../config.yaml --components-path ../../../components -- go run .
dapr run --app-id order-processor --resources-path ../../../resources -- go run .
```
Once the application has started, the `order-processor`service writes and reads `orderId` key/value pairs to the `statestore` Redis instance [defined in the `statestore.yaml` component]({{< ref "statemanagement-quickstart.md#statestoreyaml-component-file" >}}).
@ -808,7 +806,7 @@ Once Redis is stopped, the requests begin to fail and the retry policy titled `r
INFO[0006] Error processing operation component[statestore] output. Retrying...
```
As per the `retryFroever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
As per the `retryForever` policy, retries will continue for each failed request indefinitely, in 5 second intervals.
```yaml
retryForever:

View File

@ -6,22 +6,22 @@ weight: 2000
description: "Customize processing pipelines by adding middleware components"
---
Dapr allows custom processing pipelines to be defined by chaining a series of middleware components. There are two places that you can use a middleware pipeline;
Dapr allows custom processing pipelines to be defined by chaining a series of middleware components. There are two places that you can use a middleware pipeline:
1) Building block APIs - HTTP middleware components are executed when invoking any Dapr HTTP APIs.
2) Service-to-Service invocation - HTTP middleware components are applied to service-to-service invocation calls.
1. Building block APIs - HTTP middleware components are executed when invoking any Dapr HTTP APIs.
2. Service-to-Service invocation - HTTP middleware components are applied to service-to-service invocation calls.
## Configure API middleware pipelines
When launched, a Dapr sidecar constructs a middleware processing pipeline for incoming HTTP calls. By default, the pipeline consists of [tracing middleware]({{< ref tracing-overview.md >}}) and CORS middleware. Additional middleware, configured by a Dapr [configuration]({{< ref configuration-concept.md >}}), can be added to the pipeline in the order they are defined. The pipeline applies to all Dapr API endpoints, including state, pub/sub, service invocation, bindings, secrets, configuration, distributed lock, and others.
When launched, a Dapr sidecar constructs a middleware processing pipeline for incoming HTTP calls. By default, the pipeline consists of the [tracing]({{< ref tracing-overview.md >}}) and CORS middlewares. Additional middlewares, configured by a Dapr [Configuration]({{< ref configuration-concept.md >}}), can be added to the pipeline in the order they are defined. The pipeline applies to all Dapr API endpoints, including state, pub/sub, service invocation, bindings, secrets, configuration, distributed lock, etc.
A request goes through all the defined middleware components before it's routed to user code, and then goes through the defined middleware, in reverse order, before it's returned to the client, as shown in the following diagram.
<img src="/images/middleware.png" width=800>
<img src="/images/middleware.png" width="800" alt="Diagram showing the flow of a request and a response through the middlewares, as described in the paragraph above" />
HTTP middleware components are executed when invoking Dapr HTTP APIs using the `httpPipeline` configuration.
The following configuration example defines a custom pipeline that uses a [OAuth 2.0 middleware]({{< ref middleware-oauth2.md >}}) and an [uppercase middleware component]({{< ref middleware-uppercase.md >}}). In this case, all requests are authorized through the OAuth 2.0 protocol, and transformed to uppercase text, before they are forwarded to user code.
The following configuration example defines a custom pipeline that uses an [OAuth 2.0 middleware]({{< ref middleware-oauth2.md >}}) and an [uppercase middleware component]({{< ref middleware-uppercase.md >}}). In this case, all requests are authorized through the OAuth 2.0 protocol, and transformed to uppercase text, before they are forwarded to user code.
```yaml
apiVersion: dapr.io/v1alpha1
@ -38,19 +38,19 @@ spec:
type: middleware.http.uppercase
```
As with other components, middleware components can be found in the [supported Middleware reference]({{< ref supported-middleware >}}) and in the [components-contrib repo](https://github.com/dapr/components-contrib/tree/master/middleware/http).
As with other components, middleware components can be found in the [supported Middleware reference]({{< ref supported-middleware >}}) and in the [`dapr/components-contrib` repo](https://github.com/dapr/components-contrib/tree/master/middleware/http).
{{< button page="supported-middleware" text="See all middleware components">}}
## Configure app middleware pipelines
You can also use any middleware components when making service-to-service invocation calls. For example, for token validation in a zero-trust environment, a request transformation for a specific app endpoint, or to apply OAuth policies.
You can also use any middleware component when making service-to-service invocation calls. For example, to add token validation in a zero-trust environment, to transform a request for a specific app endpoint, or to apply OAuth policies.
Service-to-service invocation middleware components apply to all outgoing calls from Dapr sidecar to the receiving application (service) as shown in the diagram below.
Service-to-service invocation middleware components apply to all **outgoing** calls from a Dapr sidecar to the receiving application (service), as shown in the diagram below.
<img src="/images/app-middleware.png" width=800>
<img src="/images/app-middleware.png" width="800" alt="Diagram showing the flow of a service invocation request. Requests from the callee Dapr sidecar to the callee application go through the app middleware pipeline as described in the paragraph above." />
Any middleware component that can be applied to HTTP middleware can also be applied to service-to-service invocation calls as a middleware component using `appHttpPipeline` configuration. The example below adds the `uppercase` middleware component for all outgoing calls from the Dapr sidecar to the application that this configuration is applied to.
Any middleware component that can be used as HTTP middleware can also be applied to service-to-service invocation calls as a middleware component using the `appHttpPipeline` configuration. The example below adds the `uppercase` middleware component for all outgoing calls from the Dapr sidecar (target of service invocation) to the application that this configuration is applied to.
```yaml
apiVersion: dapr.io/v1alpha1

View File

@ -57,7 +57,7 @@ Since you are running Dapr in the same host as the component, verify this folder
Define your component using a [component spec]({{< ref component-schema.md >}}). Your component's `type` is derived from the socket name, without the file extension.
Save the component YAML file in the components-path, replacing:
Save the component YAML file in the resources-path, replacing:
- `your_socket_goes_here` with your component socket name (no extension)
- `your_component_type` with your component type

View File

@ -196,6 +196,20 @@ It is recommended that a production-ready deployment includes the following sett
6. Dapr also supports **scoping components for certain applications**. This is not a required practice, and can be enabled according to your security needs. See [here]({{< ref "component-scopes.md" >}}) for more info.
## Service account tokens
By default, Kubernetes mounts a volume containing a [Service Account token](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in each container. Applications can use this token, whose permissions vary depending on the configuration of the cluster and namespace, among other things, to perform API calls against the Kubernetes control plane.
When creating a new Pod (or a Deployment, StatefulSet, Job, etc), you can disable auto-mounting the Service Account token by setting `automountServiceAccountToken: false` in your pod's spec.
It is recommended that you consider deploying your apps with `automountServiceAccountToken: false` to improve the security posture of your pods, unless your apps depend on having a Service Account token. For example, you may need a Service Account token if:
- You are using Dapr components that interact with the Kubernetes APIs, for example the [Kubernetes secret store]({{< ref "kubernetes-secret-store.md" >}}) or the [Kubernetes Events binding]{{< ref "kubernetes-binding.md" >}}).
Note that initializing Dapr components using [component secrets]({{< ref "component-secrets.md" >}}) stored as Kubernetes secrets does **not** require a Service Account token, so you can still set `automountServiceAccountToken: false` in this case. Only calling the Kubernetes secret store at runtime, using the [Secrets management]({{< ref "secrets-overview.md" >}}) building block, is impacted.
- Your own application needs to interact with the Kubernetes APIs.
Because of the reasons above, Dapr does not set `automountServiceAccountToken: false` automatically for you. However, in all situations where the Service Account is not required by your solution, it is recommended that you set this option in the pods spec.
## Tracing and metrics configuration
Dapr has tracing and metrics enabled by default. It is *recommended* that you set up distributed tracing and metrics for your applications and the Dapr control plane in production.

View File

@ -3,10 +3,10 @@ type: docs
title: "Policies"
linkTitle: "Policies"
weight: 4500
description: "Configure resiliency policies for timeouts, retries and circuit breakers"
description: "Configure resiliency policies for timeouts, retries, and circuit breakers"
---
You define timeouts, retries and circuit breaker policies under `policies`. Each policy is given a name so you can refer to them from the `targets` section in the resiliency spec.
Define timeouts, retries, and circuit breaker policies under `policies`. Each policy is given a name so you can refer to them from the `targets` section in the resiliency spec.
> Note: Dapr offers default retries for specific APIs. [See here]({{< ref "#override-default-retries" >}}) to learn how you can overwrite default retry logic with user defined retry policies.
@ -285,4 +285,10 @@ The table below is a break down of which policies are applied when attempting to
| statestore | DefaultStatestoreComponentOutboundRetryPolicy |
| actorstore | fastRetries |
| EventActor | retryForever |
| SummaryActor | DefaultActorRetryPolicy |
| SummaryActor | DefaultActorRetryPolicy |
## Next steps
Try out one of the Resiliency quickstarts:
- [Resiliency: Service-to-service]({{< ref resiliency-serviceinvo-quickstart.md >}})
- [Resiliency: State Management]({{< ref resiliency-state-quickstart.md >}})

View File

@ -5,11 +5,8 @@ linkTitle: "Overview"
weight: 4500
description: "Configure Dapr retries, timeouts, and circuit breakers"
---
{{% alert title="Note" color="primary" %}}
Resiliency is currently a preview feature. Before you can utilize a resiliency spec, you must first [enable the resiliency preview feature]({{< ref support-preview-features >}}).
{{% /alert %}}
Dapr provides a capability for defining and applying fault tolerance resiliency policies via a [resiliency spec]({{< ref "resiliency-overview.md#complete-example-policy" >}}). Resiliency specs are saved in the same location as components specs and are applied when the Dapr sidecar starts. The sidecar determines how to apply resiliency policies to your Dapr API calls. In self-hosted mode, the resiliency spec must be named `resiliency.yaml`. In Kubernetes Dapr finds the named resiliency specs used by your application. Within the resiliency spec, you can define policies for popular resiliency patterns, such as:
Dapr provides a capability for defining and applying fault tolerance resiliency policies via a [resiliency spec]({{< ref "resiliency-overview.md#complete-example-policy" >}}). Resiliency specs are saved in the same location as components specs and are applied when the Dapr sidecar starts. The sidecar determines how to apply resiliency policies to your Dapr API calls. In self-hosted mode, the resiliency spec must be named `resiliency.yaml`. In Kubernetes Dapr finds the named resiliency specs used by your application. Within the resiliency spec, you can define policies for popular resiliency patterns, such as:
- [Timeouts]({{< ref "policies.md#timeouts" >}})
- [Retries/back-offs]({{< ref "policies.md#retries" >}})
@ -171,3 +168,9 @@ Watch this video for how to use [resiliency](https://www.youtube.com/watch?t=184
- [Policies]({{< ref "policies.md" >}})
- [Targets]({{< ref "targets.md" >}})
## Next steps
Try out one of the Resiliency quickstarts:
- [Resiliency: Service-to-service]({{< ref resiliency-serviceinvo-quickstart.md >}})
- [Resiliency: State Management]({{< ref resiliency-state-quickstart.md >}})

View File

@ -7,6 +7,7 @@ description: "Apply resiliency policies to apps, components and actors"
---
### Targets
Named policies are applied to targets. Dapr supports three target types that apply all Dapr building block APIs:
- `apps`
- `components`
@ -129,4 +130,10 @@ spec:
circuitBreaker: general
circuitBreakerScope: both
circuitBreakerCacheSize: 5000
```
```
## Next steps
Try out one of the Resiliency quickstarts:
- [Resiliency: Service-to-service]({{< ref resiliency-serviceinvo-quickstart.md >}})
- [Resiliency: State Management]({{< ref resiliency-state-quickstart.md >}})

View File

@ -16,8 +16,7 @@ For CLI there is no explicit opt-in, just the version that this was first made a
| Feature | Description | Setting | Documentation | Version introduced |
| ---------- |-------------|---------|---------------|-----------------|
| **`--image-registry`** flag in Dapr CLI| In self-hosted mode, you can set this flag to specify any private registry to pull the container images required to install Dapr| N/A | [CLI `init` command reference]({{< ref "dapr-init.md#self-hosted-environment" >}}) | v1.7 |
| **Resiliency** | Allows configuring of fine-grained policies for retries, timeouts, and circuitbreaking. | `Resiliency` | [Configure Resiliency policies]({{< ref "resiliency-overview">}}) | v1.7|
| **App Middleware** | Allow middleware components to be executed when making service-to-service calls | N/A | [App Middleware]({{< ref "middleware.md#app-middleware" >}}) | v1.9 |
| **App health checks** | Allows configuring app health checks | `AppHealthCheck` | [App health checks]({{< ref "app-health.md" >}}) | v1.9 |
| **Pluggable components** | Allows creating self-hosted gRPC-based components written in any language that supports gRPC. The following component APIs are supported: State stores, Pub/sub, Bindings | N/A | [Pluggable components concept]({{< ref "components-concept#pluggable-components" >}})| v1.9 |
| **Workflows** | Author workflows as code to automate and orchestrate tasks within your application, like messaging, state management, and failure handling | N/A | [Workflows concept]({{< ref "components-concept#workflows" >}})| v1.10 |
| **Workflows** | Author workflows as code to automate and orchestrate tasks within your application, like messaging, state management, and failure handling | N/A | [Workflows concept]({{< ref "components-concept#workflows" >}})| v1.10 |

View File

@ -64,6 +64,90 @@ Parameter | Description
> Additional metadata parameters are available based on each pubsub component.
## Publish multiple messages to a given topic
This endpoint lets you publish multiple messages to consumers who are listening on a `topic`.
### HTTP Request
```
POST http://localhost:<daprPort>/v1.0-alpha1/publish/bulk/<pubsubname>/<topic>[?<metadata>]
```
The request body should contain a JSON array of entries with:
- Unique entry IDs
- The event to publish
- The content type of the event
If the content type for an event is not `application/cloudevents+json`, it is auto-wrapped as a CloudEvent (unless `metadata.rawPayload` is set to `true`).
Example:
```bash
curl -X POST http://localhost:3500/v1.0-alpha1/publish/bulk/pubsubName/deathStarStatus \
-H 'Content-Type: application/json' \
-d '[
{
"entryId": "ae6bf7c6-4af2-11ed-b878-0242ac120002",
"event": "first text message",
"contentType": "text/plain"
},
{
"entryId": "b1f40bd6-4af2-11ed-b878-0242ac120002",
"event": {
"message": "second JSON message"
},
"contentType": "application/json"
},
]'
```
### Headers
The `Content-Type` header should always be set to `application/json` since the request body is a JSON array.
### URL Parameters
|**Parameter**|**Description**|
|--|--|
|`daprPort`|The Dapr port|
|`pubsubname`|The name of pub/sub component|
|`topic`|The name of the topic|
|`metadata`|Query parameters for [metadata]({{< ref "pubsub_api.md#metadata" >}})|
### Metadata
Metadata can be sent via query parameters in the request's URL. It must be prefixed with `metadata.`, as shown in the table below.
|**Parameter**|**Description**|
|--|--|
|`metadata.rawPayload`|Boolean to determine if Dapr should publish the messages without wrapping them as CloudEvent.|
|`metadata.maxBulkPubBytes`|Maximum bytes to publish in a bulk publish request.|
#### HTTP Response
|**HTTP Status**|**Description**|
|--|--|
|204|All messages delivered|
|400|Pub/sub does not exist|
|403|Forbidden by access controls|
|500|At least one message failed to be delivered|
In case of a 500 status code, the response body will contain a JSON object containing a list of entries that failed to be delivered. For example from our request above, if the entry with event `"first text message"` failed to be delivered, the response would contain its entry ID and an error message from the underlying pub/sub component.
```json
{
"failedEntries": [
{
"entryId": "ae6bf7c6-4af2-11ed-b878-0242ac120002",
"error": "some error message"
},
],
"errorCode": "ERR_PUBSUB_PUBLISH_MESSAGE"
}
```
## Optional Application (User Code) Routes
### Provide a route for Dapr to discover topic subscriptions

View File

@ -23,18 +23,17 @@ dapr run [flags] [command]
| Name | Environment Variable | Default | Description |
| ------------------------------ | -------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| `--app-id`, `-a` | `APP_ID` | | The id for your application, used for service discovery |
| `--app-max-concurrency` | | `unlimited` | The concurrency level of the application; default is unlimited |
| `--app-id`, `-a` | `APP_ID` | | The id for your application, used for service discovery. Cannot contain dots. |
| `--app-max-concurrency` | | `unlimited` | The concurrency level of the application; default is unlimited |
| `--app-port`, `-p` | `APP_PORT` | | The port your application is listening on |
| `--app-protocol`, `-P` | | `http` | The protocol Dapr uses to talk to the application. Valid values are: `http` or `grpc` |
| `--app-ssl` | | `false` | Enable https when Dapr invokes the application |
| `--components-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components` <br/>Windows: `%USERPROFILE%\.dapr\components` | **Deprecated** in favor of `--resources-path` |
| `--resources-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components` <br/>Windows: `%USERPROFILE%\.dapr\components` | The path for components directory |
| `--resources-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components` <br/>Windows: `%USERPROFILE%\.dapr\components` | The path for components directory |
| `--config`, `-c` | | Linux/Mac: `$HOME/.dapr/config.yaml` <br/>Windows: `%USERPROFILE%\.dapr\config.yaml` | Dapr configuration file |
| `--dapr-grpc-port` | `DAPR_GRPC_PORT` | `50001` | The gRPC port for Dapr to listen on |
| `--dapr-http-port` | `DAPR_HTTP_PORT` | `3500` | The HTTP port for Dapr to listen on |
| `--enable-profiling` | | `false` | Enable "pprof" profiling via an HTTP endpoint |
| `--help`, `-h` | | | Print the help message |
| `--help`, `-h` | | | Print the help message |
| `--image` | | | Use a custom Docker image. Format is `repository/image` for Docker Hub, or `example.com/repository/image` for a custom registry. |
| `--log-level` | | `info` | The log verbosity. Valid values are: `debug`, `info`, `warn`, `error`, `fatal`, or `panic` |
| `--enable-api-logging` | | `false` | Enable the logging of all API calls from application to Dapr |
@ -46,8 +45,9 @@ dapr run [flags] [command]
| `--app-health-probe-timeout` | | | Timeout for app health probes in milliseconds |
| `--app-health-threshold` | | | Number of consecutive failures for the app to be considered unhealthy |
| `--unix-domain-socket`, `-u` | | | Path to a unix domain socket dir mount. If specified, communication with the Dapr sidecar uses unix domain sockets for lower latency and greater throughput when compared to using TCP ports. Not available on Windows. |
| `--dapr-http-max-request-size` | | `4` | Max size of the request body in MB. |
| `--dapr-http-max-request-size` | | `4` | Max size of the request body in MB. |
| `--dapr-http-read-buffer-size` | | `4` | Max size of the HTTP read buffer in KB. This also limits the maximum size of HTTP headers. The default 4 KB |
| `--components-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components` <br/>Windows: `%USERPROFILE%\.dapr\components` | **Deprecated** in favor of `--resources-path` |
### Examples

View File

@ -9,7 +9,7 @@ aliases:
## Component format
To setup Azure Event Hubs binding create a component of type `bindings.azure.eventhubs`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
To setup an Azure Event Hubs binding, create a component of type `bindings.azure.eventhubs`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
See [this](https://docs.microsoft.com/azure/event-hubs/event-hubs-dotnet-framework-getstarted-send) for instructions on how to set up an Event Hub.
@ -22,18 +22,39 @@ spec:
type: bindings.azure.eventhubs
version: v1
metadata:
- name: connectionString # Azure EventHubs connection string
value: "Endpoint=sb://****"
- name: consumerGroup # EventHubs consumer group
value: "group1"
- name: storageAccountName # Azure Storage Account Name
value: "accountName"
- name: storageAccountKey # Azure Storage Account Key
value: "accountKey"
- name: storageContainerName # Azure Storage Container Name
value: "containerName"
- name: partitionID # (Optional) PartitionID to send and receive events
value: 0
# Hub name ("topic")
- name: eventHub
value: "mytopic"
- name: consumerGroup
value: "myapp"
# Either connectionString or eventHubNamespace is required
# Use connectionString when *not* using Azure AD
- name: connectionString
value: "Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"
# Use eventHubNamespace when using Azure AD
- name: eventHubNamespace
value: "namespace"
- name: enableEntityManagement
value: "false"
# The following four properties are needed only if enableEntityManagement is set to true
- name: resourceGroupName
value: "test-rg"
- name: subscriptionID
value: "value of Azure subscription ID"
- name: partitionCount
value: "1"
- name: messageRetentionInDays
value: "3"
# Checkpoint store attributes
- name: storageAccountName
value: "myeventhubstorage"
- name: storageAccountKey
value: "112233445566778899"
- name: storageContainerName
value: "myeventhubstoragecontainer"
# Alternative to passing storageAccountKey
- name: storageConnectionString
value: "DefaultEndpointsProtocol=https;AccountName=<account>;AccountKey=<account-key>"
```
{{% alert title="Warning" color="warning" %}}
@ -42,25 +63,31 @@ The above example uses secrets as plain strings. It is recommended to use a secr
## Spec metadata fields
| Field | Required | Binding support | Details | Example |
| Field | Required | Binding support | Details | Example |
|--------------------|:--------:|------------|-----|---------|
| connectionString | Y | Output | The [EventHubs connection string](https://docs.microsoft.com/azure/event-hubs/authorize-access-shared-access-signature). Note that this is the EventHub itself and not the EventHubs namespace. Make sure to use the child EventHub shared access policy connection string | `"Endpoint=sb://****"` |
| consumerGroup | Y | Output | The name of an [EventHubs Consumer Group](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#consumer-groups) to listen on | `"group1"` |
| storageAccountName | Y | Output | The name of the account of the Azure Storage account to persist checkpoints data on | `"accountName"` |
| storageAccountKey | Y* | Output | The account key for the Azure Storage account to persist checkpoints data on. ***Not required if using AAD authentication.** | `"accountKey"` |
| storageContainerName | Y | Output | The name of the container in the Azure Storage account to persist checkpoints data on | `"containerName"` |
| partitionID | N | Output | ID of the partition to send and receive events | `0` |
| eventHub | N | Output | The name of the EventHubs hub. **Required if using AAD authentication.** | `eventHubsNamespace-hubName` |
| eventHubNamespace | N | Output | The name of the EventHubs namespace. **Required if using AAD authentication.** | `eventHubsNamespace` |
| `eventHub` | Y* | Input/Output | The name of the Event Hubs hub ("topic"). Required if using Azure AD authentication or if the connection string doesn't contain an `EntityPath` value | `mytopic` |
| `connectionString` | Y* | Input/Output | Connection string for the Event Hub or the Event Hub namespace.<br>* Mutally exclusive with `eventHubNamespace` field.<br>* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"`
| `eventHubNamespace` | Y* | Input/Output | The Event Hub Namespace name.<br>* Mutally exclusive with `connectionString` field.<br>* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"`
| `enableEntityManagement` | N | Input/Output | Boolean value to allow management of the EventHub namespace and storage account. Default: `false` | `"true", "false"`
| `resourceGroupName` | N | Input/Output | Name of the resource group the Event Hub namespace is part of. Required when entity management is enabled | `"test-rg"`
| `subscriptionID` | N | Input/Output | Azure subscription ID value. Required when entity management is enabled | `"azure subscription id"`
| `partitionCount` | N | Input/Output | Number of partitions for the new Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"2"`
| `messageRetentionInDays` | N | Input/Output | Number of days to retain messages for in the newly created Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"90"`
| `consumerGroup` | Y | Input | The name of the [Event Hubs Consumer Group](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#consumer-groups) to listen on | `"group1"` |
| `storageAccountName` | Y | Input | Storage account name to use for the checkpoint store. |`"myeventhubstorage"`
| `storageAccountKey` | Y* | Input | Storage account key for the checkpoint store account.<br>* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"`
| `storageConnectionString` | Y* | Input | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey=<account-key>"`
| `storageContainerName` | Y | Input | Storage container name for the storage account name. | `"myeventhubstoragecontainer"`
### Azure Active Directory (AAD) authentication
The Azure Event Hubs pubsub component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}).
The Azure Event Hubs pub/sub component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}).
## Binding support
This component supports **output binding** with the following operations:
- `create`
- `create`: publishes a new message to Azure Event Hubs
## Input Binding to Azure IoT Hub Events
@ -79,7 +106,7 @@ The device-to-cloud events created by Azure IoT Hub devices will contain additio
For example, the headers of a HTTP `Read()` response would contain:
```nodejs
```js
{
'user-agent': 'fasthttp',
'host': '127.0.0.1:3000',

View File

@ -20,6 +20,18 @@ spec:
metadata:
- name: url
value: http://something.com
- name: MTLSRootCA
value: /Users/somepath/root.pem # OPTIONAL <path to root CA> or <pem encoded string>
- name: MTLSClientCert
value: /Users/somepath/client.pem # OPTIONAL <path to client cert> or <pem encoded string>
- name: MTLSClientKey
value: /Users/somepath/client.key # OPTIONAL <path to client key> or <pem encoded string>
- name: securityToken # OPTIONAL <token to include as a header on HTTP requests>
secretKeyRef:
name: mysecret
key: mytoken
- name: securityTokenHeader
value: "Authorization: Bearer" # OPTIONAL <header name for the security token>
```
## Spec metadata fields
@ -27,6 +39,11 @@ spec:
| Field | Required | Binding support | Details | Example |
|--------------------|:--------:|--------|--------|---------|
| url | Y | Output |The base URL of the HTTP endpoint to invoke | `http://host:port/path`, `http://myservice:8000/customers`
| MTLSRootCA | N | Output |Path to root ca certificate or pem encoded string |
| MTLSClientCert | N | Output |Path to client certificate or pem encoded string |
| MTLSClientKey | N | Output |Path client private key or pem encoded string |
| securityToken | N | Output |The value of a token to be added to an HTTP request as a header. Used together with `securityTokenHeader` |
| securityTokenHeader| N | Output |The name of the header for `securityToken` on an HTTP request that |
## Binding support
@ -292,6 +309,17 @@ curl -d '{ "operation": "get" }' \
{{< /tabs >}}
## Using mTLS or enabling client TLS authentication along with HTTPS
You can configure the HTTP binding to use mTLS or client TLS authentication along with HTTPS by providing the `MTLSRootCA`, `MTLSClientCert`, and `MTLSClientKey` metadata fields in the binding component.
These fields can be passed as a file path or as a pem encoded string.
- If the file path is provided, the file is read and the contents are used.
- If the pem encoded string is provided, the string is used as is.
When these fields are configured, the Dapr sidecar uses the provided certificate to authenticate itself with the server during the TLS handshake process.
### When to use:
You can use this when the server with which the HTTP binding is configured to communicate requires mTLS or client TLS authentication.
## Related links

View File

@ -105,14 +105,21 @@ Using SQS FIFO (`fifo` metadata field set to `"true"`) per AWS specifications pr
Specifying `fifoMessageGroupID` limits the number of concurrent consumers of the FIFO queue used to only one but guarantees global ordering of messages published by the app's Dapr sidecars. See [this AWS blog post](https://aws.amazon.com/blogs/compute/solving-complex-ordering-challenges-with-amazon-sqs-fifo-queues/) to better understand the topic of Message Group IDs and FIFO queues.
To avoid losing the order of messages delivered to consumers, the FIFO configuration for the SQS Component requires the `concurrencyMode` metadata field set to `"single"`.
#### Default parallel `concurrencyMode`
Since v1.8.0, the component supports the `"parallel"` `concurrencyMode` as its default mode. In prior versions, the component default behavior was calling the subscriber a single message at a time and waiting for its response.
#### SQS dead-letter Queues
When configuring the PubSub component with SQS dead-letter queues, the metadata fields `messageReceiveLimit` and `sqsDeadLettersQueueName` must both be set to a value. For `messageReceiveLimit`, the value must be greater than `0` and the `sqsDeadLettersQueueName` must not be empty string.
{{% alert title="Important" color="warning" %}}
When running the Dapr sidecar (`daprd`) with your application on EKS (AWS Kubernetes) node/pod already attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec.
{{% /alert %}}
## Create an SNS/SQS instance
{{< tabs "Self-Hosted" "Kubernetes" "AWS" >}}

View File

@ -8,7 +8,8 @@ aliases:
---
## Component format
To setup Azure Event Hubs pubsub create a component of type `pubsub.azure.eventhubs`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
To setup an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
Apart from the configuration metadata fields shown below, Azure Event Hubs also supports [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms.
```yaml
@ -20,29 +21,34 @@ spec:
type: pubsub.azure.eventhubs
version: v1
metadata:
- name: connectionString # Either connectionString or eventHubNamespace. Should not be used when
# Azure Authentication mechanism is used.
value: "Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"
- name: eventHubNamespace # Either connectionString or eventHubNamespace. Should be used when
# Azure Authentication mechanism is used.
value: "namespace"
- name: enableEntityManagement
value: "false"
## The following four properties are needed only if enableEntityManagement is set to true
- name: resourceGroupName
value: "test-rg"
- name: subscriptionID
value: "value of Azure subscription ID"
- name: partitionCount
value: "1"
- name: messageRetentionInDays
## Subscriber attributes
- name: storageAccountName
value: "myeventhubstorage"
- name: storageAccountKey
value: "112233445566778899"
- name: storageContainerName
value: "myeventhubstoragecontainer"
# Either connectionString or eventHubNamespace is required
# Use connectionString when *not* using Azure AD
- name: connectionString
value: "Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"
# Use eventHubNamespace when using Azure AD
- name: eventHubNamespace
value: "namespace"
- name: enableEntityManagement
value: "false"
# The following four properties are needed only if enableEntityManagement is set to true
- name: resourceGroupName
value: "test-rg"
- name: subscriptionID
value: "value of Azure subscription ID"
- name: partitionCount
value: "1"
- name: messageRetentionInDays
value: "3"
# Checkpoint store attributes
- name: storageAccountName
value: "myeventhubstorage"
- name: storageAccountKey
value: "112233445566778899"
- name: storageContainerName
value: "myeventhubstoragecontainer"
# Alternative to passing storageAccountKey
- name: storageConnectionString
value: "DefaultEndpointsProtocol=https;AccountName=<account>;AccountKey=<account-key>"
```
{{% alert title="Warning" color="warning" %}}
@ -53,21 +59,24 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| connectionString | Y* | Connection-string for the Event Hub or the Event Hub namespace. *Mutally exclusive with `eventHubNamespace` field. *Not to be used when [Azure Authentication]({{< ref "authenticating-azure.md" >}}) is used | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"`
| eventHubNamespace | N* | The Event Hub Namespace name. *Mutally exclusive with `connectionString` field. *To be used when [Azure Authentication]({{< ref "authenticating-azure.md" >}}) is used | `"namespace"`
| storageAccountName | Y | Storage account name to use for the EventProcessorHost |`"myeventhubstorage"`
| storageAccountKey | Y* | Storage account key to use for the EventProcessorHost. Can be `secretKeyRef` to use a secret reference. *Omit if using [Azure Authentication]({{< ref "authenticating-azure.md" >}}) and AAD authentication to the storage account is preferred. | `"112233445566778899"`
| storageContainerName | Y | Storage container name for the storage account name. | `"myeventhubstoragecontainer"`
| enableEntityManagement | N | Boolean value to allow management of EventHub namespace. Default: `false` | `"true", "false"`
| resourceGroupName | N | Name of the resource group the event hub namespace is a part of. Needed when entity management is enabled | `"test-rg"`
| subscriptionID | N | Azure subscription ID value. Needed when entity management is enabled | `"azure subscription id"`
| partitionCount | N | Number of partitions for the new event hub. Only used when entity management is enabled. Default: `"1"` | `"2"`
| messageRetentionInDays | N | Number of days to retain messages for in the newly created event hub. Used only when entity management is enabled. Default: `"1"` | `"90"`
| `connectionString` | Y* | Connection string for the Event Hub or the Event Hub namespace.<br>* Mutally exclusive with `eventHubNamespace` field.<br>* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"`
| `eventHubNamespace` | Y* | The Event Hub Namespace name.<br>* Mutally exclusive with `connectionString` field.<br>* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"`
| `storageAccountName` | Y | Storage account name to use for the checkpoint store. |`"myeventhubstorage"`
| `storageAccountKey` | Y* | Storage account key for the checkpoint store account.<br>* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"`
| `storageConnectionString` | Y* | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey=<account-key>"`
| `storageContainerName` | Y | Storage container name for the storage account name. | `"myeventhubstoragecontainer"`
| `enableEntityManagement` | N | Boolean value to allow management of the EventHub namespace and storage account. Default: `false` | `"true", "false"`
| `resourceGroupName` | N | Name of the resource group the Event Hub namespace is part of. Required when entity management is enabled | `"test-rg"`
| `subscriptionID` | N | Azure subscription ID value. Required when entity management is enabled | `"azure subscription id"`
| `partitionCount` | N | Number of partitions for the new Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"2"`
| `messageRetentionInDays` | N | Number of days to retain messages for in the newly created Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"90"`
### Azure Active Directory (AAD) authentication
The Azure Event Hubs pubsub component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}).
The Azure Event Hubs pub/sub component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}).
#### Example Configuration
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
@ -77,32 +86,31 @@ spec:
type: pubsub.azure.eventhubs
version: v1
metadata:
# Azure Authentication Used
- name: azureTenantId
value: "***"
- name: azureClientId
value: "***"
- name: azureClientSecret
value: "***"
- name: eventHubNamespace
value: "namespace"
- name: enableEntityManagement
value: "false"
## The following four properties are needed only if enableEntityManagement is set to true
- name: resourceGroupName
value: "test-rg"
- name: subscriptionID
value: "value of Azure subscription ID"
- name: partitionCount
value: "1"
- name: messageRetentionInDays
## Subscriber attributes
- name: storageAccountName
value: "myeventhubstorage"
- name: storageAccountKey
value: "112233445566778899"
- name: storageContainerName
value: "myeventhubstoragecontainer"
# Azure Authentication Used
- name: azureTenantId
value: "***"
- name: azureClientId
value: "***"
- name: azureClientSecret
value: "***"
- name: eventHubNamespace
value: "namespace"
- name: enableEntityManagement
value: "false"
# The following four properties are needed only if enableEntityManagement is set to true
- name: resourceGroupName
value: "test-rg"
- name: subscriptionID
value: "value of Azure subscription ID"
- name: partitionCount
value: "1"
- name: messageRetentionInDays
# Checkpoint store attributes
# In this case, we're using Azure AD to access the storage account too
- name: storageAccountName
value: "myeventhubstorage"
- name: storageContainerName
value: "myeventhubstoragecontainer"
```
## Sending multiple messages
@ -115,27 +123,27 @@ Azure Event Hubs natively supports sending multiple messages in a single operati
## Create an Azure Event Hub
Follow the instructions [here](https://docs.microsoft.com/azure/event-hubs/event-hubs-create) on setting up Azure Event Hubs.
Since this implementation uses the Event Processor Host, you will also need an [Azure Storage Account](https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal). Follow the instructions [here](https://docs.microsoft.com/azure/storage/common/storage-account-keys-manage) to manage the storage account access keys.
Follow the instructions on the [documentation](https://docs.microsoft.com/azure/event-hubs/event-hubs-create) to set up Azure Event Hubs.
See [here](https://docs.microsoft.com/azure/event-hubs/authorize-access-shared-access-signature) on how to get the Event Hubs connection string. Note this is not the Event Hubs namespace.
Because this component uses Azure Storage as checkpoint store, you will also need an [Azure Storage Account](https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal). Follow the instructions on the [documentation](https://docs.microsoft.com/azure/storage/common/storage-account-keys-manage) to manage the storage account access keys.
See the [documentation](https://docs.microsoft.com/azure/event-hubs/authorize-access-shared-access-signature) on how to get the Event Hubs connection string (note this is not for the Event Hubs namespace).
### Create consumer groups for each subscriber
For every Dapr app that wants to subscribe to events, create an Event Hubs consumer group with the name of the `dapr id`.
For example, a Dapr app running on Kubernetes with `dapr.io/app-id: "myapp"` will need an Event Hubs consumer group named `myapp`.
For every Dapr app that wants to subscribe to events, create an Event Hubs consumer group with the name of the Dapr app ID. For example, a Dapr app running on Kubernetes with `dapr.io/app-id: "myapp"` will need an Event Hubs consumer group named `myapp`.
Note: Dapr passes the name of the Consumer group to the EventHub and so this is not supplied in the metadata.
Note: Dapr passes the name of the consumer group to the Event Hub, so this is not supplied in the metadata.
## Entity Management
When entity management is enabled in configuration, as long as the application has the right role and permissions to manipulate the Event Hub namespace, creation of Event Hubs and consumer groups can be done on the fly.
When entity management is enabled in the metadata, as long as the application has the right role and permissions to manipulate the Event Hub namespace, Dapr can automatically create the Event Hub and consumer group for you.
The Evet Hub name is the `topic` field in the incoming request to publish or subscribe to, while the consumer group name is the name of the `dapr app` which subscribes to a given Event Hub. For example, a Dapr app running on Kubernetes with name `dapr.io/app-id: "myapp"` requires an Event Hubs consumer group named `myapp`.
The Evet Hub name is the `topic` field in the incoming request to publish or subscribe to, while the consumer group name is the name of the Dapr app which subscribes to a given Event Hub. For example, a Dapr app running on Kubernetes with name `dapr.io/app-id: "myapp"` requires an Event Hubs consumer group named `myapp`.
Entity management is only possible when using [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms and not via `connectionString`.
Entity management is only possible when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) and not using a connection string.
Note: Dapr passes the name of the Consumer group to the EventHub and this is not supplied in the metadata.
> Dapr passes the name of the consumer group to the Event Hub, so this is not supplied in the metadata.
## Subscribing to Azure IoT Hub Events
@ -154,7 +162,7 @@ The device-to-cloud events created by Azure IoT Hub devices will contain additio
For example, the headers of a delivered HTTP subscription message would contain:
```nodejs
```js
{
'user-agent': 'fasthttp',
'host': '127.0.0.1:3000',
@ -174,6 +182,7 @@ For example, the headers of a delivered HTTP subscription message would contain:
```
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components
- [Pub/Sub building block]({{< ref pubsub >}})

View File

@ -96,9 +96,9 @@
output: true
- component: Redis
link: redis
state: Beta
state: Stable
version: v1
since: "1.7"
since: "1.9"
features:
input: false
output: true

View File

@ -1,5 +1,8 @@
- component: AWS SNS/SQS
link: setup-aws-snssqs
state: Beta
state: Stable
version: v1
since: "1.6"
features:
bulkPublish: false
bulkSubscribe: false

View File

@ -3,8 +3,14 @@
state: Stable
version: v1
since: "1.8"
features:
bulkPublish: true
bulkSubscribe: false
- component: Azure Service Bus
link: setup-azure-servicebus
state: Stable
version: v1
since: "1.0"
features:
bulkPublish: true
bulkSubscribe: true

View File

@ -3,3 +3,6 @@
state: Alpha
version: v1
since: "1.0"
features:
bulkPublish: false
bulkSubscribe: false

View File

@ -3,53 +3,86 @@
state: Deprecated
version: v1
since: "1.9"
- component: In Memory
features:
bulkPublish: false
bulkSubscribe: false
- component: In-memory
link: setup-inmemory
state: Beta
version: v1
since: "1.7"
features:
bulkPublish: false
bulkSubscribe: false
- component: Apache Kafka
link: setup-apache-kafka
state: Stable
version: v1
since: "1.5"
features:
bulkPublish: true
bulkSubscribe: true
- component: Redis Streams
link: setup-redis-pubsub
state: Stable
version: v1
since: "1.0"
features:
bulkPublish: false
bulkSubscribe: false
- component: JetStream
link: setup-jetstream
state: Alpha
version: v1
since: "1.4"
- component: Pulsar
link: setup-pulsar
state: Beta
version: v1
since: "1.7"
since: "1.10"
features:
bulkPublish: false
bulkSubscribe: false
- component: Pulsar
link: setup-pulsar
state: Stable
version: v1
since: "1.10"
features:
bulkPublish: false
bulkSubscribe: false
- component: MQTT3
link: setup-mqtt3
state: Stable
version: v1
since: "1.7"
features:
bulkPublish: false
bulkSubscribe: false
- component: NATS Streaming
link: setup-nats-streaming
state: Beta
version: v1
since: "1.0"
features:
bulkPublish: false
bulkSubscribe: false
- component: RabbitMQ
link: setup-rabbitmq
state: Stable
version: v1
since: "1.7"
features:
bulkPublish: false
bulkSubscribe: false
- component: RocketMQ
link: setup-rocketmq
state: Alpha
version: v1
since: "1.8"
features:
bulkPublish: false
bulkSubscribe: false
- component: Solace-AMQP
link: setup-solace-amqp
state: Alpha
state: Beta
version: v1
since: "1.10"
features:
bulkPublish: false
bulkSubscribe: false

View File

@ -33,9 +33,9 @@
query: false
- component: Azure Table Storage
link: setup-azure-tablestorage
state: Beta
state: Stable
version: v1
since: "1.7"
since: "1.9"
features:
crud: true
transactions: false

View File

@ -22,9 +22,9 @@
query: false
- component: CockroachDB
link: setup-cockroachdb
state: Beta
state: Stable
version: v1
since: "1.7"
since: "1.10"
features:
crud: true
transactions: true
@ -64,7 +64,7 @@
etag: false
ttl: false
query: false
- component: In Memory
- component: In-memory
link: setup-inmemory
state: Developer-only
version: v1
@ -110,9 +110,9 @@
query: true
- component: MySQL
link: setup-mysql
state: Beta
state: Stable
version: v1
since: "1.7"
since: "1.10"
features:
crud: true
transactions: true

View File

@ -10,6 +10,8 @@
<table width="100%">
<tr>
<th>Component</th>
<th>Bulk Publish</th>
<th>Bulk Subscribe</th>
<th>Status</th>
<th>Component version</th>
<th>Since runtime version</th>
@ -18,6 +20,8 @@
<tr>
<td><a href="/reference/components-reference/supported-pubsub/{{ .link }}/">{{ .component }}</a>
</td>
<td align="center">{{ if .features.bulkPublish }}✅{{else}}<img src="/images/emptybox.png">{{ end }}</td>
<td align="center">{{ if .features.bulkSubscribe }}✅{{else}}<img src="/images/emptybox.png">{{ end }}</td>
<td>{{ .state }}</td>
<td>{{ .version }}</td>
<td>{{ .since }}</td>

File diff suppressed because it is too large Load Diff

@ -1 +1 @@
Subproject commit e87b9ad6eefaa05390144d82642df13c5b4bed17
Subproject commit 52b82d7ce6599822a37d2528379f5ca146e286bb