mirror of https://github.com/dapr/docs.git
Merge branch 'dapr:v1.6' into mfussell-sec-patch
This commit is contained in:
commit
ea9c810953
|
@ -3,11 +3,11 @@ name: Azure Static Web App Root
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- v1.5
|
||||
- v1.6
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
branches:
|
||||
- v1.5
|
||||
- v1.6
|
||||
|
||||
jobs:
|
||||
build_and_deploy_job:
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
name: Azure Static Web App v1.5
|
||||
name: Azure Static Web App v1.6
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v1.5
|
||||
- v1.6
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
branches:
|
||||
- v1.5
|
||||
- v1.6
|
||||
|
||||
jobs:
|
||||
build_and_deploy_job:
|
||||
|
@ -27,7 +27,7 @@ jobs:
|
|||
HUGO_ENV: production
|
||||
HUGO_VERSION: "0.74.3"
|
||||
with:
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_5 }}
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_6 }}
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
|
||||
skip_deploy_on_missing_secrets: true
|
||||
action: "upload"
|
||||
|
@ -48,6 +48,6 @@ jobs:
|
|||
id: closepullrequest
|
||||
uses: Azure/static-web-apps-deploy@v0.0.1-preview
|
||||
with:
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_5 }}
|
||||
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_6 }}
|
||||
skip_deploy_on_missing_secrets: true
|
||||
action: "close"
|
|
@ -5,3 +5,4 @@ node_modules/
|
|||
daprdocs/public
|
||||
daprdocs/resources/_gen
|
||||
.venv/
|
||||
.hugo_build.lock
|
|
@ -14,8 +14,8 @@ The following branches are currently maintained:
|
|||
|
||||
| Branch | Website | Description |
|
||||
| ------------------------------------------------------------ | -------------------------- | ------------------------------------------------------------------------------------------------ |
|
||||
| [v1.5](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. |
|
||||
| [v1.6](https://github.com/dapr/docs/tree/v1.6) (pre-release) | https://v1-6.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.6+ go here. |
|
||||
| [v1.6](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. |
|
||||
| [v1.7](https://github.com/dapr/docs/tree/v1.7) (pre-release) | https://v1-7.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.7+ go here. |
|
||||
|
||||
For more information visit the [Dapr branch structure](https://docs.dapr.io/contributing/contributing-docs/#branch-guidance) document.
|
||||
|
||||
|
|
|
@ -160,20 +160,23 @@ offlineSearch = false
|
|||
github_repo = "https://github.com/dapr/docs"
|
||||
github_project_repo = "https://github.com/dapr/dapr"
|
||||
github_subdir = "daprdocs"
|
||||
github_branch = "v1.5"
|
||||
github_branch = "v1.6"
|
||||
|
||||
# Versioning
|
||||
version_menu = "v1.5 (latest)"
|
||||
version = "v1.5"
|
||||
version_menu = "v1.6 (latest)"
|
||||
version = "v1.6"
|
||||
archived_version = false
|
||||
url_latest_version = "https://docs.dapr.io"
|
||||
|
||||
[[params.versions]]
|
||||
version = "v1.6 (preview)"
|
||||
url = "https://v1-6.docs.dapr.io"
|
||||
version = "v1.7 (preview)"
|
||||
url = "https://v1-7.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.5 (latest)"
|
||||
version = "v1.6 (latest)"
|
||||
url = "#"
|
||||
[[params.versions]]
|
||||
version = "v1.5"
|
||||
url = "https://v1-5.docs.dapr.io"
|
||||
[[params.versions]]
|
||||
version = "v1.4"
|
||||
url = "https://v1-4.docs.dapr.io"
|
||||
|
|
|
@ -21,7 +21,7 @@ Today we are experiencing a wave of cloud adoption. Developers are comfortable w
|
|||
|
||||
This is where Dapr comes in. Dapr codifies the *best practices* for building microservice applications into open, independent APIs called building blocks, that enable you to build portable applications with the language and framework of your choice. Each building block is completely independent and you can use one, some, or all of them in your application.
|
||||
|
||||
Using Dapr you can incrementally migrate your existing applications to a microserivces architecture, thereby adopting cloud native patterns such scale out/in, resilency and independent deployments.
|
||||
Using Dapr you can incrementally migrate your existing applications to a microserivces architecture, thereby adopting cloud native patterns such scale out/in, resiliency and independent deployments.
|
||||
|
||||
In addition, Dapr is platform agnostic, meaning you can run your applications locally, on any Kubernetes cluster, on virtual or physical machines and in other hosting environments that Dapr integrates with. This enables you to build microservice applications that can run on the cloud and edge.
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ We welcome community members giving presentations on Dapr and spreading the word
|
|||
|
||||
| Presentation | Recording | Deck |
|
||||
|--------------|-----------|------|
|
||||
| Running Event-Driven Pub/Sub Microservices In Kubernetes With Dapr | [Link](https://youtu.be/-4sHUvfk2Eg) | N/A
|
||||
| Ignite 2019: Mark Russinovich Presents the Future of Cloud Native Applications | [Link](https://www.youtube.com/watch?v=LAUDVk8PaCY) | [Link](/presentations/2019IgniteCloudNativeApps.pdf)
|
||||
| Azure Community Live: Build microservice applications using DAPR with Mark Fussell | [Link](https://www.youtube.com/watch?v=CgqI7nen-Ng) | N/A
|
||||
| Ready 2020: Mark Russinovich Presents Cloud Native Applications | [Link](https://youtu.be/eJCu6a-x9uo?t=1614) | [Link](/presentations/2020ReadyCloudNativeApps.pdf)
|
||||
|
|
|
@ -11,6 +11,12 @@ This HowTo uses the Redis configuration store component as an example on how to
|
|||
|
||||
*This API is currently in `Alpha` state and only available on gRPC. An HTTP1.1 supported version with this URL syntax `/v1.0/configuration` will be available before the API is certified into `Stable` state.*
|
||||
|
||||
## Example:
|
||||
|
||||
The below code examples loosely describe an application that processes orders. In the examples, there is an order processing service which has a Dapr sidecar. The order processing service uses Dapr to retrieve the configuration from a Redis configuration store.
|
||||
|
||||
<img src="/images/building-block-configuration-example.png" width=1000 alt="Diagram showing get configuration of example service">
|
||||
|
||||
## Step 1: Create a configuration item in store
|
||||
|
||||
First, create a configuration item in a supported configuration store. This can be a simple key-value item, with any key of your choice. For this example, we'll use the Redis configuration store component.
|
||||
|
@ -31,7 +37,7 @@ redis-cli -p 6379
|
|||
Save a configuration item:
|
||||
|
||||
```
|
||||
set myconfig "wookie"
|
||||
MSET orderId1 "101||1" orderId2 "102||1"
|
||||
```
|
||||
|
||||
### Configure a Dapr configuration store
|
||||
|
@ -42,7 +48,7 @@ Save the following component file, for example to the [default components folder
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: redisconfigstore
|
||||
name: configstore
|
||||
spec:
|
||||
type: configuration.redis
|
||||
metadata:
|
||||
|
@ -52,6 +58,104 @@ spec:
|
|||
value: <PASSWORD>
|
||||
```
|
||||
|
||||
### Get configuration items using Dapr SDKs
|
||||
|
||||
{{< tabs Dotnet Java Python>}}
|
||||
|
||||
{{% codetab %}}
|
||||
```csharp
|
||||
//dependencies
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading.Tasks;
|
||||
using Dapr.Client;
|
||||
|
||||
//code
|
||||
namespace ConfigurationApi
|
||||
{
|
||||
public class Program
|
||||
{
|
||||
private static readonly string CONFIG_STORE_NAME = "configstore";
|
||||
|
||||
[Obsolete]
|
||||
public static async Task Main(string[] args)
|
||||
{
|
||||
using var client = new DaprClientBuilder().Build();
|
||||
var configuration = await client.GetConfiguration(CONFIG_STORE_NAME, new List<string>() { "orderId1", "orderId2" });
|
||||
Console.WriteLine($"Got key=\n{configuration[0].Key} -> {configuration[0].Value}\n{configuration[1].Key} -> {configuration[1].Value}");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code and run the following command to launch the application along with a Dapr sidecar:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --components-path ./components -- dotnet run
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```java
|
||||
//dependencies
|
||||
import io.dapr.client.DaprClientBuilder;
|
||||
import io.dapr.client.DaprPreviewClient;
|
||||
import io.dapr.client.domain.ConfigurationItem;
|
||||
import io.dapr.client.domain.GetConfigurationRequest;
|
||||
import io.dapr.client.domain.SubscribeConfigurationRequest;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
//code
|
||||
private static final String CONFIG_STORE_NAME = "configstore";
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) {
|
||||
List<String> keys = new ArrayList<>();
|
||||
keys.add("orderId1");
|
||||
keys.add("orderId2");
|
||||
GetConfigurationRequest req = new GetConfigurationRequest(CONFIG_STORE_NAME, keys);
|
||||
try {
|
||||
Mono<List<ConfigurationItem>> items = client.getConfiguration(req);
|
||||
items.block().forEach(ConfigurationClient::print);
|
||||
} catch (Exception ex) {
|
||||
System.out.println(ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code and run the following command to launch the application along with a Dapr sidecar:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --components-path ./components mvn spring-boot:run
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```python
|
||||
#dependencies
|
||||
from dapr.clients import DaprClient
|
||||
#code
|
||||
with DaprClient() as d:
|
||||
CONFIG_STORE_NAME = 'configstore'
|
||||
keys = ['orderId1', 'orderId2']
|
||||
#Startup time for dapr
|
||||
d.wait(20)
|
||||
configuration = d.get_configuration(store_name=CONFIG_STORE_NAME, keys=[keys], config_metadata={})
|
||||
print(f"Got key={configuration.items[0].key} value={configuration.items[0].value} version={configuration.items[0].version}")
|
||||
```
|
||||
|
||||
Navigate to the directory containing the above code and run the following command to launch the application along with a Dapr sidecar:
|
||||
|
||||
```bash
|
||||
dapr run --app-id orderprocessing --components-path ./components python3 OrderProcessingService.py
|
||||
```
|
||||
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Get configuration items using gRPC API
|
||||
|
||||
Using your [favorite language](https://grpc.io/docs/languages/), create a Dapr gRPC client from the [Dapr proto](https://github.com/dapr/dapr/blob/master/dapr/proto/runtime/v1/dapr.proto). The following examples show Java, C#, Python and Javascript clients.
|
||||
|
@ -87,7 +191,7 @@ client.GetConfigurationAlpha1({ StoreName: 'redisconfigstore', Keys = ['myconfig
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
### Watch configuration items
|
||||
##### Watch configuration items
|
||||
|
||||
Create a Dapr gRPC client from the [Dapr proto](https://github.com/dapr/dapr/blob/master/dapr/proto/runtime/v1/dapr.proto) using your [preferred language](https://grpc.io/docs/languages/). Then use the proto method `SubscribeConfigurationAlpha1` on your client stub to start subscribing to events. The method accepts the following request object:
|
||||
|
||||
|
@ -108,5 +212,23 @@ message SubscribeConfigurationRequest {
|
|||
|
||||
Using this method, you can subscribe to changes in specific keys for a given configuration store. gRPC streaming varies widely based on language - see the [gRPC examples here](https://grpc.io/docs/languages/) for usage.
|
||||
|
||||
##### Stop watching configuration items
|
||||
|
||||
After you have subscribed to watch configuration items, the gRPC-server stream starts. This stream thread does not close itself, and you have to do by explicitly call the `UnSubscribeConfigurationRequest` API. This method accepts the following request object:
|
||||
|
||||
```proto
|
||||
// UnSubscribeConfigurationRequest is the message to stop watching the key-value configuration.
|
||||
message UnSubscribeConfigurationRequest {
|
||||
// The name of configuration store.
|
||||
string store_name = 1;
|
||||
// Optional. The keys of the configuration item to stop watching.
|
||||
// Store_name and keys should match previous SubscribeConfigurationRequest's keys and store_name.
|
||||
// Once invoked, the subscription that is watching update for the key-value event is stopped
|
||||
repeated string keys = 2;
|
||||
}
|
||||
```
|
||||
|
||||
Using this unsubscribe method, you can stop watching configuration update events. Dapr locates the subscription stream based on the `store_name` and any optional keys supplied and closes it.
|
||||
|
||||
## Next steps
|
||||
* Read [configuration API overview]({{< ref configuration-api-overview.md >}})
|
||||
* Read [configuration API overview]({{< ref configuration-api-overview.md >}})
|
||||
|
|
|
@ -84,7 +84,7 @@ This is much more unusual. There may be occasions where you specifically chose t
|
|||
Theses are the specific trace context headers that are generated and propagated by Dapr for HTTP and gRPC.
|
||||
|
||||
### Trace context HTTP headers format
|
||||
When propogating a trace context header from an HTTP response to an HTTP request, these are the headers that you need to copy.
|
||||
When propagating a trace context header from an HTTP response to an HTTP request, these are the headers that you need to copy.
|
||||
|
||||
#### Traceparent Header
|
||||
The traceparent header represents the incoming request in a tracing system in a common format, understood by all vendors.
|
||||
|
|
|
@ -93,7 +93,7 @@ Similarly, if two different applications (different app-IDs) subscribe to the sa
|
|||
|
||||
### Topic scoping
|
||||
|
||||
By default, all topics backing the Dapr pub/sub component (e.g. Kafka, Redis Stream, RabbitMQ) are available to every application configured with that component. To limit which application can publish or subscribe to topics, Dapr provides topic scoping. This enables to you say which topics an application is allowed to publish and which topics an application is allowed to subscribe to. For more information read [publish/subscribe topic scoping]({{< ref pubsub-scopes.md >}}).
|
||||
By default, all topics backing the Dapr pub/sub component (e.g. Kafka, Redis Stream, RabbitMQ) are available to every application configured with that component. To limit which application can publish or subscribe to topics, Dapr provides topic scoping. This enables you to say which topics an application is allowed to publish and which topics an application is allowed to subscribe to. For more information read [publish/subscribe topic scoping]({{< ref pubsub-scopes.md >}}).
|
||||
|
||||
### Message Time-to-Live (TTL)
|
||||
Dapr can set a timeout message on a per message basis, meaning that if the message is not read from the pub/sub component, then the message is discarded. This is to prevent the build up of messages that are not read. A message that has been in the queue for longer than the configured TTL is said to be dead. For more information read [publish/subscribe message time-to-live]({{< ref pubsub-message-ttl.md >}}).
|
||||
|
|
|
@ -90,7 +90,7 @@ spec:
|
|||
Run the sidecar and the Go server:
|
||||
|
||||
```bash
|
||||
dapr run --app-id server --app-protocol grpc --app-port 50051 --config config.yaml -- go run main.go
|
||||
dapr run --app-id server --app-port 50051 --config config.yaml -- go run main.go
|
||||
```
|
||||
|
||||
Using the Dapr CLI, we're assigning a unique id to the app, `server`, using the `--app-id` flag.
|
||||
|
|
|
@ -107,7 +107,7 @@ namespace EventService
|
|||
//Using Dapr SDK to save and get state
|
||||
await client.SaveStateAsync(DAPR_STORE_NAME, "order_1", orderId.ToString());
|
||||
await client.SaveStateAsync(DAPR_STORE_NAME, "order_2", orderId.ToString());
|
||||
var result = await client.GetStateAsync<string>(DAPR_STORE_NAME, orderId.ToString());
|
||||
var result = await client.GetStateAsync<string>(DAPR_STORE_NAME, "order_1");
|
||||
Console.WriteLine("Result after get: " + result);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,15 +17,20 @@ The state query API provides a way of querying the key/value data stored in stat
|
|||
Even though the state store is a key/value store, the `value` might be a JSON document with its own hierarchy, keys, and values.
|
||||
The query API allows you to use those keys and values to retrive corresponding documents.
|
||||
|
||||
This query API does not support querying of actor state stored in a state store. For that you need to use the query API for the specific database.
|
||||
See [querying actor state]({{< ref "state-management-overview.md#querying-actor-state" >}}).
|
||||
### Limitations
|
||||
The state query API has the following limitations:
|
||||
|
||||
- The API does not support querying of actor state stored in a state store. For that you need to use the query API for the specific database. See [querying actor state]({{< ref "state-management-overview.md#querying-actor-state" >}}).
|
||||
- The API does not work with Dapr [encrypted state stores]({{<ref howto-encrypt-state>}}) capability. Since the encryption is done by the Dapr runtime and stored as encrypted data, then this effectively prevents server side querying.
|
||||
|
||||
|
||||
|
||||
You can find additional information in the [related links]({{< ref "#related-links" >}}) section.
|
||||
|
||||
## Querying the state
|
||||
|
||||
You submit query requests via HTTP POST/PUT or gRPC.
|
||||
The body of the request is the JSON map with 3 entries: `filter`, `sort`, and `pagination`.
|
||||
The body of the request is the JSON map with 3 entries: `filter`, `sort`, and `page`.
|
||||
|
||||
The `filter` is an optional section. It specifies the query conditions in the form of a tree of key/value operations, where the key is the operator and the value is the operands.
|
||||
|
||||
|
@ -42,7 +47,7 @@ If `filter` section is omitted, the query returns all entries.
|
|||
|
||||
The `sort` is an optional section and is an ordered array of `key:order` pairs, where `key` is a key in the state store, and the `order` is an optional string indicating sorting order: `"ASC"` for ascending and `"DESC"` for descending. If omitted, ascending order is the default.
|
||||
|
||||
The `pagination` is an optional section containing `limit` and `token` parameters. `limit` sets the page size. `token` is an iteration token returned by the component, and is used in subsequent queries.
|
||||
The `page` is an optional section containing `limit` and `token` parameters. `limit` sets the page size. `token` is an iteration token returned by the component, and is used in subsequent queries.
|
||||
|
||||
For some background understanding, this query request is translated into the native query language and executed by the state store component.
|
||||
|
||||
|
@ -87,17 +92,15 @@ First, let's find all employees in the state of California and sort them by thei
|
|||
This is the [query](../query-api-examples/query1.json):
|
||||
```json
|
||||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"EQ": { "value.state": "CA" }
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.person.id",
|
||||
"order": "DESC"
|
||||
}
|
||||
]
|
||||
}
|
||||
"filter": {
|
||||
"EQ": { "value.state": "CA" }
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.person.id",
|
||||
"order": "DESC"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -186,10 +189,8 @@ Let's now find all employees from the "Dev Ops" and "Hardware" organizations.
|
|||
This is the [query](../query-api-examples/query2.json):
|
||||
```json
|
||||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"IN": { "value.person.org": [ "Dev Ops", "Hardware" ] }
|
||||
}
|
||||
"filter": {
|
||||
"IN": { "value.person.org": [ "Dev Ops", "Hardware" ] }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -228,36 +229,34 @@ This is the [query](../query-api-examples/query3.json):
|
|||
|
||||
```json
|
||||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"limit": 3
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
}
|
||||
],
|
||||
"page": {
|
||||
"limit": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -336,40 +335,52 @@ The pagination token is used "as is" in the [subsequent query](../query-api-exam
|
|||
|
||||
```json
|
||||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"limit": 3,
|
||||
"token": "3"
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
}
|
||||
],
|
||||
"page": {
|
||||
"limit": 3,
|
||||
"token": "3"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< tabs "HTTP API (Bash)" "HTTP API (PowerShell)" >}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -s -X POST -H "Content-Type: application/json" -d @query-api-examples/query3-token.json http://localhost:3500/v1.0-alpha1/state/statestore/query | jq .
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```powershell
|
||||
Invoke-RestMethod -Method Post -ContentType 'application/json' -InFile query-api-examples/query3-token.json -Uri 'http://localhost:3500/v1.0-alpha1/state/statestore/query'
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{< /tabs >}}
|
||||
|
||||
And the result of this query is:
|
||||
```json
|
||||
{
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"EQ": { "value.state": "CA" }
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.person.id",
|
||||
"order": "DESC"
|
||||
}
|
||||
]
|
||||
}
|
||||
"filter": {
|
||||
"EQ": { "value.state": "CA" }
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.person.id",
|
||||
"order": "DESC"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"IN": { "value.person.org": [ "Dev Ops", "Hardware" ] }
|
||||
}
|
||||
"filter": {
|
||||
"IN": { "value.person.org": [ "Dev Ops", "Hardware" ] }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,34 +1,32 @@
|
|||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"limit": 3,
|
||||
"token": "3"
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
}
|
||||
],
|
||||
"page": {
|
||||
"limit": 3,
|
||||
"token": "3"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,33 +1,31 @@
|
|||
{
|
||||
"query": {
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"limit": 3
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
}
|
||||
],
|
||||
"page": {
|
||||
"limit": 3
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
---
|
||||
type: docs
|
||||
title: "Dapr extension for Azure Kubernetes Service (AKS)"
|
||||
linkTitle: "Dapr extension for Azure Kubernetes Service (AKS)"
|
||||
description: "Provision Dapr on your Azure Kubernetes Service (AKS) cluster with the Dapr extension"
|
||||
weight: 4000
|
||||
---
|
||||
|
||||
# Prerequisites
|
||||
- [Azure subscription](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)
|
||||
- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli-windows?tabs=azure-cli) and the ***aks-preview*** extension.
|
||||
- [Azure Kubernetes Service (AKS) cluster](https://docs.microsoft.com/azure/aks/tutorial-kubernetes-deploy-cluster?tabs=azure-cli)
|
||||
|
||||
## Install Dapr using the AKS Dapr extension
|
||||
The recommended approach for installing Dapr on AKS is to use the AKS Dapr extension. The extension offers support for all native Dapr configuration capabilities through command-line arguments via the Azure CLI and offers the option of opting into automatic minor version upgrades of the Dapr runtime.
|
||||
|
||||
{{% alert title="Note" color="warning" %}}
|
||||
If you install Dapr through the AKS extension, our recommendation is to continue using the extension for future management of Dapr instead of the Dapr CLI. Combining the two tools can cause conflicts and result in undesired behavior.
|
||||
{{% /alert %}}
|
||||
|
||||
### How the extension works
|
||||
The Dapr extension works by provisioning the Dapr control plane on your AKS cluster through the Azure CLI. The dapr control plane consists of:
|
||||
|
||||
- **dapr-operator**: Manages component updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.)
|
||||
- **dapr-sidecar-injector**: Injects Dapr into annotated deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT`. This enables user-defined applications to communicate with Dapr without the need to hard-code Dapr port values.
|
||||
- **dapr-placement**: Used for actors only. Creates mapping tables that map actor instances to pods
|
||||
- **dapr-sentry**: Manages mTLS between services and acts as a certificate authority. For more information read the security overview.
|
||||
|
||||
### Extension Prerequisites
|
||||
In order to use the AKS Dapr extension, you must first enable the `AKS-ExtensionManager` and `AKS-Dapr` feature flags on your Azure subscription.
|
||||
|
||||
The below command will register the `AKS-ExtensionManager` and `AKS-Dapr` feature flags on your Azure subscription:
|
||||
|
||||
```bash
|
||||
az feature register --namespace "Microsoft.ContainerService" --name "AKS-ExtensionManager"
|
||||
az feature register --namespace "Microsoft.ContainerService" --name "AKS-Dapr"
|
||||
```
|
||||
|
||||
After a few minutes, check the status to show `Registered`. Confirm the registration status by using the az feature list command:
|
||||
|
||||
```bash
|
||||
az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-ExtensionManager')].{Name:name,State:properties.state}"
|
||||
az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-Dapr')].{Name:name,State:properties.state}"
|
||||
```
|
||||
|
||||
Next, refresh the registration of the `Microsoft.KubernetesConfiguration` and `Microsoft.ContainerService` resource providers by using the az provider register command:
|
||||
|
||||
```bash
|
||||
az provider register --namespace Microsoft.KubernetesConfiguration
|
||||
az provider register --namespace Microsoft.ContainerService
|
||||
```
|
||||
|
||||
#### Enable the Azure CLI extension for cluster extensions
|
||||
You will also need the `k8s-extension` Azure CLI extension. Install this by running the following commands:
|
||||
|
||||
```bash
|
||||
az extension add --name k8s-extension
|
||||
```
|
||||
|
||||
If the `k8s-extension` extension is already present, you can update it to the latest version using the below command:
|
||||
|
||||
```bash
|
||||
az extension update --name k8s-extension
|
||||
```
|
||||
|
||||
#### Create the extension and install Dapr on your AKS cluster
|
||||
After your subscription is registered to use Kubernetes extensions, install Dapr on your cluster by creating the Dapr extension. For example:
|
||||
|
||||
```bash
|
||||
az k8s-extension create --cluster-type managedClusters \
|
||||
--cluster-name myAKSCluster \
|
||||
--resource-group myResourceGroup \
|
||||
--name myDaprExtension \
|
||||
--extension-type Microsoft.Dapr
|
||||
```
|
||||
|
||||
Additionally, Dapr can automatically update its minor version. To enable this, set the `--auto-upgrade-minor-version` parameter to true:
|
||||
|
||||
```bash
|
||||
--auto-upgrade-minor-version true
|
||||
```
|
||||
|
||||
Once the k8-extension finishes provisioning, you can confirm that the Dapr control plane is installed on your AKS cluster by running:
|
||||
|
||||
```bash
|
||||
kubectl get pods -n dapr-system
|
||||
```
|
||||
|
||||
For further information such as configuration options and targeting specific versions of Dapr, please see the official [AKS Dapr Extension Docs](https://docs.microsoft.com/azure/aks/dapr).
|
|
@ -36,7 +36,7 @@ The Dapr SDKs are the easiest way for you to get Dapr into your application. Cho
|
|||
| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ |
|
||||
| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ |
|
||||
| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ |
|
||||
| [Javascript](https://github.com/dapr/js-sdk) | Stable| ✔ | | ✔ |
|
||||
| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ |
|
||||
| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | |
|
||||
| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | |
|
||||
|
||||
|
|
|
@ -17,11 +17,11 @@ The [Dapr Quickstarts](https://github.com/dapr/quickstarts/tree/v1.5.0) are a co
|
|||
|
||||
| Quickstart | Description |
|
||||
|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Hello World](https://github.com/dapr/quickstarts/tree/v1.5.0/hello-world) | Demonstrates how to run Dapr locally. Highlights service invocation and state management. |
|
||||
| [Hello Kubernetes](https://github.com/dapr/quickstarts/tree/v1.5.0/hello-kubernetes) | Demonstrates how to run Dapr in Kubernetes. Highlights service invocation and state management. |
|
||||
| [Distributed Calculator](https://github.com/dapr/quickstarts/tree/v1.5.0/distributed-calculator) | Demonstrates a distributed calculator application that uses Dapr services to power a React web app. Highlights polyglot (multi-language) programming, service invocation and state management. |
|
||||
| [Pub/Sub](https://github.com/dapr/quickstarts/tree/v1.5.0/pub-sub) | Demonstrates how to use Dapr to enable pub-sub applications. Uses Redis as a pub-sub component. |
|
||||
| [Bindings](https://github.com/dapr/quickstarts/tree/v1.5.0/bindings) | Demonstrates how to use Dapr to create input and output bindings to other components. Uses bindings to Kafka. |
|
||||
| [Observability](https://github.com/dapr/quickstarts/tree/v1.5.0/observability) | Demonstrates Dapr tracing capabilities. Uses Zipkin as a tracing component. |
|
||||
| [Secret Store](https://github.com/dapr/quickstarts/tree/v1.5.0/secretstore) | Demonstrates the use of Dapr Secrets API to access secret stores. |
|
||||
| [Hello World](https://github.com/dapr/quickstarts/tree/v1.6.0/hello-world) | Demonstrates how to run Dapr locally. Highlights service invocation and state management. |
|
||||
| [Hello Kubernetes](https://github.com/dapr/quickstarts/tree/v1.6.0/hello-kubernetes) | Demonstrates how to run Dapr in Kubernetes. Highlights service invocation and state management. |
|
||||
| [Distributed Calculator](https://github.com/dapr/quickstarts/tree/v1.6.0/distributed-calculator) | Demonstrates a distributed calculator application that uses Dapr services to power a React web app. Highlights polyglot (multi-language) programming, service invocation and state management. |
|
||||
| [Pub/Sub](https://github.com/dapr/quickstarts/tree/v1.6.0/pub-sub) | Demonstrates how to use Dapr to enable pub-sub applications. Uses Redis as a pub-sub component. |
|
||||
| [Bindings](https://github.com/dapr/quickstarts/tree/v1.6.0/bindings) | Demonstrates how to use Dapr to create input and output bindings to other components. Uses bindings to Kafka. |
|
||||
| [Observability](https://github.com/dapr/quickstarts/tree/v1.6.0/observability) | Demonstrates Dapr tracing capabilities. Uses Zipkin as a tracing component. |
|
||||
| [Secret Store](https://github.com/dapr/quickstarts/tree/v1.6.0/secretstore) | Demonstrates the use of Dapr Secrets API to access secret stores. |
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
type: docs
|
||||
title: "Kubernetes cluster setup"
|
||||
linkTitle: "How-to: Setup clusters"
|
||||
weight: 80000
|
||||
weight: 15000
|
||||
description: >
|
||||
How to create a Kubernetes cluster
|
||||
---
|
|
@ -22,9 +22,10 @@ For more information on what is deployed to your Kubernetes cluster read the [Ku
|
|||
|
||||
You can install Dapr on any Kubernetes cluster. Here are some helpful links:
|
||||
|
||||
- [Setup KiNd Cluster]({{< ref setup-kind.md >}})
|
||||
- [Setup Minikube Cluster]({{< ref setup-minikube.md >}})
|
||||
- [Setup Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}})
|
||||
- [Setup Google Cloud Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs/quickstart)
|
||||
- [Setup Google Cloud Kubernetes Engine](https://docs.dapr.io/operations/hosting/kubernetes/cluster/setup-gke/)
|
||||
- [Setup Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
|
||||
|
||||
{{% alert title="Hybrid clusters" color="primary" %}}
|
||||
|
|
|
@ -75,4 +75,4 @@ By default, tailing is set to /var/log/containers/*.log. To change this setting,
|
|||
* [Telemetry Data Platform](https://newrelic.com/platform/telemetry-data-platform)
|
||||
* [New Relic Logging](https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging)
|
||||
* [Types of New Relic API keys](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/)
|
||||
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence)
|
||||
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/learn-alerts/alerts-ai-transition-guide-2022/)
|
||||
|
|
|
@ -40,4 +40,4 @@ This document explains how to install it in your cluster, either using a Helm ch
|
|||
* [Telemetry Data Platform](https://newrelic.com/platform/telemetry-data-platform)
|
||||
* [New Relic Prometheus OpenMetrics Integration](https://github.com/newrelic/helm-charts/tree/master/charts/nri-prometheus)
|
||||
* [Types of New Relic API keys](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/)
|
||||
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence)
|
||||
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/learn-alerts/alerts-ai-transition-guide-2022/)
|
||||
|
|
|
@ -83,7 +83,7 @@ spec:
|
|||
|
||||
#### Production
|
||||
|
||||
Jaeger uses Elasticsearch as the backend storage, and you can create a secret in k8s cluster to access Elasticsearch server with access control. See [Configuring and Deploying Jaeger](https://docs.openshift.com/container-platform/4.9/distr_tracing/distr_tracing_install/distr-tracing-deploying.html)
|
||||
Jaeger uses Elasticsearch as the backend storage, and you can create a secret in k8s cluster to access Elasticsearch server with access control.
|
||||
|
||||
```shell
|
||||
kubectl create secret generic jaeger-secret --from-literal=ES_PASSWORD='xxx' --from-literal=ES_USERNAME='xxx' -n ${NAMESPACE}
|
||||
|
|
|
@ -53,7 +53,7 @@ Similarly to the OpenTelemetry instrumentation, you can also leverage a New Reli
|
|||
|
||||
In case Dapr and your applications run in the context of a Kubernetes environment, you can enable additional metrics and logs.
|
||||
|
||||
The easiest way to install the New Relic Kubernetes integration is to use the [automated installer](https://one.newrelic.com/launcher/nr1-core.settings?pane=eyJuZXJkbGV0SWQiOiJrOHMtY2x1c3Rlci1leHBsb3Jlci1uZXJkbGV0Lms4cy1zZXR1cCJ9) to generate a manifest. It bundles not just the integration DaemonSets, but also other New Relic Kubernetes configurations, like [Kubernetes events](https://docs.newrelic.com/docs/integrations/kubernetes-integration/kubernetes-events/install-kubernetes-events-integration), [Prometheus OpenMetrics](https://docs.newrelic.com/docs/integrations/prometheus-integrations/get-started/send-prometheus-metric-data-new-relic/), and [New Relic log monitoring](https://docs.newrelic.com/docs/logs).
|
||||
The easiest way to install the New Relic Kubernetes integration is to use the [automated installer](https://one.newrelic.com/launcher/nr1-core.settings?pane=eyJuZXJkbGV0SWQiOiJrOHMtY2x1c3Rlci1leHBsb3Jlci1uZXJkbGV0Lms4cy1zZXR1cCJ9) to generate a manifest. It bundles not just the integration DaemonSets, but also other New Relic Kubernetes configurations, like [Kubernetes events](https://docs.newrelic.com/docs/integrations/kubernetes-integration/kubernetes-events/install-kubernetes-events-integration), [Prometheus OpenMetrics](https://docs.newrelic.com/docs/integrations/prometheus-integrations/get-started/send-prometheus-metric-data-new-relic/), and [New Relic log monitoring](https://docs.newrelic.com/docs/logs/ui-data/use-logs-ui/).
|
||||
|
||||
### New Relic Kubernetes Cluster Explorer
|
||||
|
||||
|
@ -101,7 +101,7 @@ And the exact same dashboard templates from Dapr can be imported to visualize Da
|
|||
|
||||
## New Relic Alerts
|
||||
|
||||
All the data that is collected from Dapr, Kubernetes or any services that run on top of can be used to set-up alerts and notifications into the preferred channel of your choice. See [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence).
|
||||
All the data that is collected from Dapr, Kubernetes or any services that run on top of can be used to set-up alerts and notifications into the preferred channel of your choice. See [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/learn-alerts/alerts-ai-transition-guide-2022/).
|
||||
|
||||
## Related Links/References
|
||||
|
||||
|
@ -111,4 +111,4 @@ All the data that is collected from Dapr, Kubernetes or any services that run on
|
|||
* [New Relic Trace API](https://docs.newrelic.com/docs/distributed-tracing/trace-api/introduction-trace-api/)
|
||||
* [Types of New Relic API keys](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/)
|
||||
* [New Relic OpenTelemetry User Experience](https://blog.newrelic.com/product-news/opentelemetry-user-experience/)
|
||||
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence)
|
||||
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/learn-alerts/alerts-ai-transition-guide-2022/)
|
||||
|
|
|
@ -62,6 +62,7 @@ kubectl rollout restart statefulsets/dapr-placement-server -n <DAPR_NAMESPACE>
|
|||
*Note: the control plane Sidecar Injector service does not need to be redeployed*
|
||||
|
||||
### Disabling mTLS with Helm
|
||||
*The control plane will continue to use mTLS*
|
||||
|
||||
```bash
|
||||
kubectl create ns dapr-system
|
||||
|
@ -74,6 +75,7 @@ helm install \
|
|||
```
|
||||
|
||||
### Disabling mTLS with the CLI
|
||||
*The control plane will continue to use mTLS*
|
||||
|
||||
```
|
||||
dapr init --kubernetes --enable-mtls=false
|
||||
|
@ -86,11 +88,14 @@ In order to view the Sentry service logs, run the following command:
|
|||
```
|
||||
kubectl logs --selector=app=dapr-sentry --namespace <DAPR_NAMESPACE>
|
||||
```
|
||||
|
||||
### Bringing your own certificates
|
||||
|
||||
Using Helm, you can provide the PEM encoded root cert, issuer cert and private key that will be populated into the Kubernetes secret used by the Sentry service.
|
||||
|
||||
{{% alert title="Avoiding downtime" color="warning" %}}
|
||||
To avoid downtime when rotating expiring certificates always sign your certificates with the same private root key.
|
||||
{{% /alert %}}
|
||||
|
||||
_Note: This example uses the OpenSSL command line tool, this is a widely distributed package, easily installed on Linux via the package manager. On Windows OpenSSL can be installed [using chocolatey](https://community.chocolatey.org/packages/openssl). On MacOS it can be installed using brew `brew install openssl`_
|
||||
|
||||
Create config files for generating the certificates, this is necessary for generating v3 certificates with the SAN (Subject Alt Name) extension fields. First save the following to a file named `root.conf`:
|
||||
|
@ -125,6 +130,7 @@ basicConstraints = critical, CA:true, pathlen:0
|
|||
Run the following to generate the root cert and key
|
||||
|
||||
```bash
|
||||
# skip the following line to reuse an existing root key, required for rotating expiring certificates
|
||||
openssl ecparam -genkey -name prime256v1 | openssl ec -out root.key
|
||||
openssl req -new -nodes -sha256 -key root.key -out root.csr -config root.conf -extensions v3_req
|
||||
openssl x509 -req -sha256 -days 365 -in root.csr -signkey root.key -outform PEM -out root.pem -extfile root.conf -extensions v3_req
|
||||
|
@ -133,6 +139,7 @@ openssl x509 -req -sha256 -days 365 -in root.csr -signkey root.key -outform PEM
|
|||
Next run the following to generate the issuer cert and key:
|
||||
|
||||
```bash
|
||||
# skip the following line to reuse an existing issuer key, required for rotating expiring certificates
|
||||
openssl ecparam -genkey -name prime256v1 | openssl ec -out issuer.key
|
||||
openssl req -new -sha256 -key issuer.key -out issuer.csr -config issuer.conf -extensions v3_req
|
||||
openssl x509 -req -in issuer.csr -CA root.pem -CAkey root.key -CAcreateserial -outform PEM -out issuer.pem -days 365 -sha256 -extfile issuer.conf -extensions v3_req
|
||||
|
@ -156,24 +163,99 @@ helm install \
|
|||
|
||||
If the Root or Issuer certs are about to expire, you can update them and restart the required system services.
|
||||
|
||||
{{% alert title="Avoiding downtime when rotating certificates" color="warning" %}}
|
||||
To avoid downtime when rotating expiring certificates your new certificates must be signed with the same private root key as the previous certificates. This is not currently possible using self-signed certificates generated by Dapr.
|
||||
{{% /alert %}}
|
||||
|
||||
#### Dapr-generated self-signed certificates
|
||||
|
||||
1. Clear the existing Dapr Trust Bundle secret by saving the following YAML to a file (e.g. `clear-trust-bundle.yaml`) and applying this secret.
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: dapr-trust-bundle
|
||||
labels:
|
||||
app: dapr-sentry
|
||||
data:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f `clear-trust-bundle.yaml` -n <DAPR_NAMESPACE>
|
||||
```
|
||||
|
||||
2. Restart the Dapr Sentry service. This will generate a new certificate bundle and update the `dapr-trust-bundle` Kubernetes secret.
|
||||
|
||||
```bash
|
||||
kubectl rollout restart -n <DAPR_NAMESPACE> deployment/dapr-sentry
|
||||
```
|
||||
|
||||
3. Once the Sentry service has been restarted, restart the rest of the Dapr control plane to pick up the new Dapr Trust Bundle.
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deploy/dapr-operator -n <DAPR_NAMESPACE>
|
||||
kubectl rollout restart statefulsets/dapr-placement-server -n <DAPR_NAMESPACE>
|
||||
```
|
||||
|
||||
4. Restart your Dapr applications to pick up the latest trust bundle.
|
||||
|
||||
{{% alert title="Potential application downtime with mTLS enabled." color="warning" %}}
|
||||
Restarts of deployments using service to service invocation using mTLS will fail until the callee service has also been restarted (thereby loading the new Dapr Trust Bundle). Additionally, the placement service will not be able to assign new actors (while existing actors remain unaffected) until applications have been restarted to load the new Dapr Trust Bundle.
|
||||
{{% /alert %}}
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deployment/mydaprservice1 kubectl deployment/myotherdaprservice2
|
||||
```
|
||||
|
||||
#### Custom certificates (bring your own)
|
||||
|
||||
First, issue new certificates using the step above in [Bringing your own certificates](#bringing-your-own-certificates).
|
||||
|
||||
Now that you have the new certificates, you can update the Kubernetes secret that holds them.
|
||||
Edit the Kubernetes secret:
|
||||
Now that you have the new certificates, use Helm to upgrade the certificates:
|
||||
|
||||
```bash
|
||||
helm upgrade \
|
||||
--set-file dapr_sentry.tls.issuer.certPEM=issuer.pem \
|
||||
--set-file dapr_sentry.tls.issuer.keyPEM=issuer.key \
|
||||
--set-file dapr_sentry.tls.root.certPEM=root.pem \
|
||||
--namespace dapr-system \
|
||||
dapr \
|
||||
dapr/dapr
|
||||
```
|
||||
|
||||
Alternatively, you can update the Kubernetes secret that holds them:
|
||||
|
||||
```bash
|
||||
kubectl edit secret dapr-trust-bundle -n <DAPR_NAMESPACE>
|
||||
```
|
||||
|
||||
Replace the `ca.crt`, `issuer.crt` and `issuer.key` keys in the Kubernetes secret with their corresponding values from the new certificates.
|
||||
*__Note: The values must be base64 encoded__*
|
||||
|
||||
If you signed the new cert root with a different private key, restart all Dapr-enabled pods.
|
||||
If you signed the new cert root with the **same private key** the Dapr Sentry service will pick up the new certificates automatically. You can restart your application deployments using `kubectl rollout restart` with zero downtime. It is not necessary to restart all deployments at once, as long as deployments are restarted before original certificate expiration.
|
||||
|
||||
If you signed the new cert root with a **different private key**, you must restart the Dapr Sentry service, followed by the remainder of the Dapr control plane service.
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deploy/dapr-sentry -n <DAPR_NAMESPACE>
|
||||
```
|
||||
|
||||
Once Sentry has been completely restarted run:
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deploy/dapr-operator -n <DAPR_NAMESPACE>
|
||||
kubectl rollout restart statefulsets/dapr-placement-server -n <DAPR_NAMESPACE>
|
||||
```
|
||||
|
||||
Next, you must restart all Dapr-enabled pods.
|
||||
The recommended way to do this is to perform a rollout restart of your deployment:
|
||||
|
||||
```
|
||||
kubectl rollout restart deploy/myapp
|
||||
```
|
||||
|
||||
You will experience potential downtime due to mismatching certificates until all deployments have successfully been restarted (and hence loaded the new Dapr certificates).
|
||||
|
||||
### Kubernetes video demo
|
||||
Watch this video to show how to update mTLS certificates on Kubernetes
|
||||
|
||||
|
@ -310,9 +392,15 @@ Place `ca.crt`, `issuer.crt` and `issuer.key` in a desired path (`$HOME/.dapr/ce
|
|||
|
||||
If the Root or Issuer certs are about to expire, you can update them and restart the required system services.
|
||||
|
||||
First, issue new certificates using the step above in [Bringing your own certificates](#bringing-your-own-certificates).
|
||||
To have Dapr generate new certificates, delete the existing certificates at `$HOME/.dapr/certs` and restart the sentry service to generate new certificates.
|
||||
|
||||
```bash
|
||||
./sentry --issuer-credentials $HOME/.dapr/certs --trust-domain cluster.local --config=./config.yaml
|
||||
```
|
||||
|
||||
To replace with your own certificates, first generate new certificates using the step above in [Bringing your own certificates](#bringing-your-own-certificates).
|
||||
|
||||
Copy `ca.crt`, `issuer.crt` and `issuer.key` to the filesystem path of every configured system service, and restart the process or container.
|
||||
By default, system services will look for the credentials in `/var/run/dapr/credentials`.
|
||||
By default, system services will look for the credentials in `/var/run/dapr/credentials`. The examples above use `$HOME/.dapr/certs` as a custom location.
|
||||
|
||||
*Note:If you signed the cert root with a different private key, restart the Dapr instances.*
|
||||
*Note: If you signed the cert root with a different private key, restart the Dapr instances.*
|
||||
|
|
|
@ -41,13 +41,14 @@ The table below shows the versions of Dapr releases that have been tested togeth
|
|||
| Jun 16th 2021 | 1.2.2</br> | 1.2.0 | Java 1.1.0 </br>Go 1.1.0 </br>PHP 1.1.0 </br>Python 1.1.0 </br>.NET 1.2.0 | 0.6.0 | Unsupported |
|
||||
| Jul 26th 2021 | 1.3</br> | 1.3.0 | Java 1.2.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.2.0 </br>.NET 1.3.0 | 0.7.0 | Unsupported |
|
||||
| Sep 14th 2021 | 1.3.1</br> | 1.3.0 | Java 1.2.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.2.0 </br>.NET 1.3.0 | 0.7.0 | Unsupported |
|
||||
| Sep 15th 2021 | 1.4</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Supported |
|
||||
| Sep 22nd 2021 | 1.4.1</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Supported
|
||||
| Sep 24th 2021 | 1.4.2</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Supported |
|
||||
| Oct 7th 2021 | 1.4.3</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Supported |
|
||||
| Dev 6th 2021 | 1.4.4</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Supported |
|
||||
| Nov 11th 2021 | 1.5.0</br> | 1.5.0 | Java 1.3.0 </br>Go 1.3.0 </br>PHP 1.1.0 </br>Python 1.4.0 </br>.NET 1.5.0 </br>JS 1.0.2 | 0.9.0 | Supported (current) |
|
||||
| Dec 6th 2021 | 1.5.1</br> | 1.5.1 | Java 1.3.0 </br>Go 1.3.0 </br>PHP 1.1.0 </br>Python 1.4.0 </br>.NET 1.5.0 </br>JS 1.0.2 | 0.9.0 | Supported (current) |
|
||||
| Sep 15th 2021 | 1.4</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Unsupported |
|
||||
| Sep 22nd 2021 | 1.4.1</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Unsupported
|
||||
| Sep 24th 2021 | 1.4.2</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Unsupported |
|
||||
| Oct 7th 2021 | 1.4.3</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Unsupported |
|
||||
| Dev 6th 2021 | 1.4.4</br> | 1.4.0 | Java 1.3.0 </br>Go 1.2.0 </br>PHP 1.1.0 </br>Python 1.3.0 </br>.NET 1.4.0 | 0.8.0 | Unsupported |
|
||||
| Nov 11th 2021 | 1.5.0</br> | 1.5.0 | Java 1.3.0 </br>Go 1.3.0 </br>PHP 1.1.0 </br>Python 1.4.0 </br>.NET 1.5.0 </br>JS 1.0.2 | 0.9.0 | Supported |
|
||||
| Dec 6th 2021 | 1.5.1</br> | 1.5.1 | Java 1.3.0 </br>Go 1.3.0 </br>PHP 1.1.0 </br>Python 1.4.0 </br>.NET 1.5.0 </br>JS 1.0.2 | 0.9.0 | Supported |
|
||||
| Jan 25th 2022 | 1.6.0</br> | 1.6.0 | Java 1.4.0 </br>Go 1.3.1 </br>PHP 1.1.0 </br>Python 1.5.0 </br>.NET 1.6.0 </br>JS 2.0.0 | 0.9.0 | Supported (current) |
|
||||
|
||||
## Upgrade paths
|
||||
After the 1.0 release of the runtime there may be situations where it is necessary to explicitly upgrade through an additional release to reach the desired target. For example an upgrade from v1.0 to v1.2 may need go pass through v1.1
|
||||
|
@ -63,20 +64,27 @@ General guidance on upgrading can be found for [self hosted mode]({{<ref self-ho
|
|||
| | 1.2.2 | 1.3.1 |
|
||||
| | 1.3.1 | 1.4.4 |
|
||||
| | 1.4.4 | 1.5.1 |
|
||||
| | 1.5.1 | 1.6.0 |
|
||||
| 1.1.0 to 1.1.2 | N/A | 1.2.2 |
|
||||
| | 1.2.2 | 1.3.1 |
|
||||
| | 1.3.1 | 1.4.4 |
|
||||
| | 1.4.4 | 1.5.1 |
|
||||
| | 1.5.1 | 1.6.0 |
|
||||
| 1.2.0 to 1.2.2 | N/A | 1.3.1 |
|
||||
| | 1.3.1 | 1.4.4 |
|
||||
| | 1.4.4 | 1.5.1 |
|
||||
| | 1.5.1 | 1.6.0 |
|
||||
| 1.3.0 | N/A | 1.3.1 |
|
||||
| | 1.3.1 | 1.4.4 |
|
||||
| | 1.4.4 | 1.5.1 |
|
||||
| | 1.5.1 | 1.6.0 |
|
||||
| 1.3.1 | N/A | 1.4.4 |
|
||||
| | 1.4.4 | 1.5.0 |
|
||||
| | 1.4.4 | 1.5.1 |
|
||||
| | 1.5.1 | 1.6.0 |
|
||||
| 1.4.0 to 1.4.2 | N/A | 1.4.4 |
|
||||
| | 1.4.4 | 1.5.1 |
|
||||
| | 1.5.1 | 1.6.0 |
|
||||
| 1.5.0 to 1.5.1 | N/A | 1.6.0 |
|
||||
|
||||
## Feature and deprecations
|
||||
There is a process for announcing feature deprecations. Deprecations are applied two (2) releases after the release in which they were announced. For example Feature X is announced to be deprecated in the 1.0.0 release notes and will then be removed in 1.2.0.
|
||||
|
@ -95,9 +103,14 @@ After announcing a future breaking change, the change will happen in 2 releases
|
|||
## Upgrade on Hosting platforms
|
||||
Dapr can support multiple hosting platforms for production. With the 1.0 release the two supported platforms are Kubernetes and physical machines. For Kubernetes upgrades see [Production guidelines on Kubernetes]({{< ref kubernetes-production.md >}})
|
||||
|
||||
### Supported Kubernetes versions
|
||||
### Supported versions of dependencies
|
||||
Below is a list of software that the latest version of Dapr (v{{% dapr-latest-version long="true" %}}) has been tested against.
|
||||
|
||||
Dapr follows [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy).
|
||||
| Dependency | Supported Version |
|
||||
|-----------------------|----------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Kubernetes | Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/) |
|
||||
| [Open Telemetry collector (OTEL)](https://github.com/open-telemetry/opentelemetry-collector/releases)| v0.4.0|
|
||||
| [Prometheus](https://prometheus.io/download/) | v2.28 |
|
||||
|
||||
## Related links
|
||||
* Read the [Versioning policy]({{< ref support-versioning.md >}})
|
||||
|
|
|
@ -64,6 +64,32 @@ In order to further diagnose any issue, check the logs of the Dapr sidecar injec
|
|||
|
||||
*Note: If you installed Dapr to a different namespace, replace dapr-system above with the desired namespace*
|
||||
|
||||
If you are deploying Dapr on Amazon EKS and using an overlay network such as Calico, you will need to set `hostNetwork` parameter to true, this is a limitation of EKS with such CNIs.
|
||||
|
||||
You can set this parameter using Helm `values.yaml` file:
|
||||
|
||||
```
|
||||
helm upgrade --install dapr dapr/dapr \
|
||||
--namespace dapr-system \
|
||||
--create-namespace \
|
||||
--values values.yaml
|
||||
```
|
||||
|
||||
`values.yaml`
|
||||
```yaml
|
||||
dapr_sidecar_injector:
|
||||
hostNetwork: true
|
||||
```
|
||||
|
||||
or using command line:
|
||||
|
||||
```
|
||||
helm upgrade --install dapr dapr/dapr \
|
||||
--namespace dapr-system \
|
||||
--create-namespace \
|
||||
--set dapr_sidecar_injector.hostNetwork=true
|
||||
```
|
||||
|
||||
## My pod is in CrashLoopBackoff or another failed state due to the daprd sidecar
|
||||
|
||||
If the Dapr sidecar (`daprd`) is taking too long to initialize, this might be surfaced as a failing health check by Kubernetes.
|
||||
|
|
|
@ -549,7 +549,7 @@ curl -X POST http://localhost:3000/actors/stormtrooper/50/method/performAction \
|
|||
|
||||
### Invoke reminder
|
||||
|
||||
Invokes a reminder for an actor with the specified reminderName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
Invokes a reminder for an actor with the specified reminderName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
|
@ -587,7 +587,7 @@ curl -X POST http://localhost:3000/actors/stormtrooper/50/method/remind/checkReb
|
|||
|
||||
### Invoke timer
|
||||
|
||||
Invokes a timer for an actor rwith the specified timerName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
Invokes a timer for an actor with the specified timerName. If the actor is not already running, the app side should [activate](#activating-an-actor) it.
|
||||
|
||||
#### HTTP Request
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ The `Content-Type` header tells Dapr which content type your data adheres to whe
|
|||
The value of the `Content-Type` header populates the `datacontenttype` field in the CloudEvent.
|
||||
Unless specified, Dapr assumes `text/plain`. If your content type is JSON, use a `Content-Type` header with the value of `application/json`.
|
||||
|
||||
If you want to send your own custom CloundEvent, use the `application/cloudevents+json` value for the `Content-Type` header.
|
||||
If you want to send your own custom CloudEvent, use the `application/cloudevents+json` value for the `Content-Type` header.
|
||||
|
||||
#### Metadata
|
||||
|
||||
|
|
|
@ -333,36 +333,34 @@ An array of JSON-encoded values
|
|||
curl http://localhost:3500/v1.0-alpha1/state/myStore/query \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"query": {
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
"filter": {
|
||||
"OR": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
"EQ": { "value.person.org": "Dev Ops" }
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
"AND": [
|
||||
{
|
||||
"EQ": { "value.person.org": "Finance" }
|
||||
},
|
||||
{
|
||||
"IN": { "value.state": [ "CA", "WA" ] }
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"limit": 3
|
||||
]
|
||||
},
|
||||
"sort": [
|
||||
{
|
||||
"key": "value.state",
|
||||
"order": "DESC"
|
||||
},
|
||||
{
|
||||
"key": "value.person.id"
|
||||
}
|
||||
],
|
||||
"page": {
|
||||
"limit": 3
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
|
|
@ -21,7 +21,7 @@ This table is meant to help users understand the equivalent options for running
|
|||
| `--control-plane-address` | not supported | | not supported | Address for a Dapr control plane |
|
||||
| `--dapr-grpc-port` | `--dapr-grpc-port` | | not supported | gRPC port for the Dapr API to listen on (default "50001") |
|
||||
| `--dapr-http-port` | `--dapr-http-port` | | not supported | The HTTP port for the Dapr API |
|
||||
|` --dapr-http-max-request-size` | --dapr-http-max-request-size | | `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB |
|
||||
| `--dapr-http-max-request-size` | --dapr-http-max-request-size | | `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB |
|
||||
| not supported | `--image` | | `dapr.io/sidecar-image` | Dapr sidecar image. Default is `daprio/daprd:latest` |
|
||||
| `--internal-grpc-port` | not supported | | not supported | gRPC port for the Dapr Internal API to listen on |
|
||||
| `--enable-metrics` | not supported | | configuration spec | Enable prometheus metric (default true) |
|
||||
|
@ -38,6 +38,7 @@ This table is meant to help users understand the equivalent options for running
|
|||
| `--app-protocol` | `--app-protocol` | `-P` | `dapr.io/app-protocol` | Tells Dapr which protocol your application is using. Valid options are `http` and `grpc`. Default is `http` |
|
||||
| `--sentry-address` | `--sentry-address` | | not supported | Address for the Sentry CA service |
|
||||
| `--version` | `--version` | `-v` | not supported | Prints the runtime version |
|
||||
| `--dapr-graceful-shutdown-seconds` | not supported | | `dapr.io/graceful-shutdown-seconds` | Graceful shutdown duration in seconds for Dapr, the maximum duration before forced shutdown when waiting for all in-progress requests to complete. Defaults to `5`. If you are running in Kubernetes mode, this value should not be larger than the Kubernetes termination grace period, who's default value is `30`.|
|
||||
| not supported | not supported | | `dapr.io/enabled` | Setting this paramater to true injects the Dapr sidecar into the pod |
|
||||
| not supported | not supported | | `dapr.io/api-token-secret` | Tells Dapr which Kubernetes secret to use for token based API authentication. By default this is not set |
|
||||
| `--dapr-listen-addresses` | not supported | | `dapr.io/sidecar-listen-addresses` | Comma separated list of IP addresses that sidecar will listen to. Defaults to all in standalone mode. Defaults to `[::1],127.0.0.1` in Kubernetes. To listen to all IPv4 addresses, use `0.0.0.0`. To listen to all IPv6 addresses, use `[::]`.
|
||||
|
|
|
@ -74,12 +74,12 @@ Table captions:
|
|||
|
||||
| Name | Input<br>Binding | Output<br>Binding | Status | Component version | Since |
|
||||
|------|:----------------:|:-----------------:|--------| --------- | ---------- |
|
||||
| [Azure Blob Storage]({{< ref blobstorage.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure CosmosDB]({{< ref cosmosdb.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Blob Storage]({{< ref blobstorage.md >}}) | | ✅ | Beta | v1 | 1.0 |
|
||||
| [Azure CosmosDB]({{< ref cosmosdb.md >}}) | | ✅ | Beta | v1 | 1.0 |
|
||||
| [Azure CosmosDBGremlinAPI]({{< ref cosmosdbgremlinapi.md >}}) | | ✅ | Alpha | v1 | 1.5 |
|
||||
| [Azure Event Grid]({{< ref eventgrid.md >}}) | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Event Hubs]({{< ref eventhubs.md >}}) | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Service Bus Queues]({{< ref servicebusqueues.md >}}) | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Event Hubs]({{< ref eventhubs.md >}}) | ✅ | ✅ | Beta | v1 | 1.0 |
|
||||
| [Azure Service Bus Queues]({{< ref servicebusqueues.md >}}) | ✅ | ✅ | Beta | v1 | 1.0 |
|
||||
| [Azure SignalR]({{< ref signalr.md >}}) | | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Storage Queues]({{< ref storagequeues.md >}}) | ✅ | ✅ | Stable| v1 | 1.0 |
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|--------------------|:--------:|--------|---------|---------|
|
||||
| url | Y | Output | The CosmosDBGremlinAPI url | `"wss://******.gremlin.cosmos.azure.com:443/"` |
|
||||
| masterKey | Y | Output | The CosmosDBGremlinAPI account master key | `"masterKey"` |
|
||||
| database | Y | Output | The username of the CosmosDBGremlinAPI database | `"username"` |
|
||||
| username | Y | Output | The username of the CosmosDBGremlinAPI database | `"/dbs/<database_name>/colls/<graph_name>"` |
|
||||
|
||||
For more information see [Quickstart: Azure Cosmos Graph DB using Gremlin ](https://docs.microsoft.com/azure/cosmos-db/graph/create-graph-console).
|
||||
|
||||
|
@ -48,6 +48,17 @@ This component supports **output binding** with the following operations:
|
|||
|
||||
- `query`
|
||||
|
||||
## Request payload sample
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"gremlin": "g.V().count()"
|
||||
},
|
||||
"operation": "query"
|
||||
}
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
|
|
|
@ -24,19 +24,19 @@ Table captions:
|
|||
|-------------------------------------------------------|--------| -----| ------------- |
|
||||
| [Apache Kafka]({{< ref setup-apache-kafka.md >}}) | Stable | v1 | 1.5 |
|
||||
| [Hazelcast]({{< ref setup-hazelcast.md >}}) | Alpha | v1 | 1.0 |
|
||||
| [MQTT]({{< ref setup-mqtt.md >}}) | Alpha | v1 | 1.0 |
|
||||
| [MQTT]({{< ref setup-mqtt.md >}}) | Beta | v1 | 1.6 |
|
||||
| [NATS Streaming]({{< ref setup-nats-streaming.md >}}) | Beta | v1 | 1.0 |
|
||||
| [In Memory]({{< ref setup-inmemory.md >}}) | Alpha | v1 | 1.4 |
|
||||
| [JetStream]({{< ref setup-jetstream.md >}}) | Alpha | v1 | 1.4 |
|
||||
| [Pulsar]({{< ref setup-pulsar.md >}}) | Alpha | v1 | 1.0 |
|
||||
| [RabbitMQ]({{< ref setup-rabbitmq.md >}}) | Alpha | v1 | 1.0 |
|
||||
| [RabbitMQ]({{< ref setup-rabbitmq.md >}}) | Beta | v1 | 1.6 |
|
||||
| [Redis Streams]({{< ref setup-redis-pubsub.md >}}) | Stable | v1 | 1.0 |
|
||||
|
||||
### Amazon Web Services (AWS)
|
||||
|
||||
| Name | Status | Component version | Since |
|
||||
|---------------------------------------------------|--------| ---- |---------------|
|
||||
| [AWS SNS/SQS]({{< ref setup-aws-snssqs.md >}}) | Alpha | v1 | 1.0 |
|
||||
|---------------------------------------------------|--------| ---- | --------------|
|
||||
| [AWS SNS/SQS]({{< ref setup-aws-snssqs.md >}}) | Beta | v1 | 1.6 |
|
||||
|
||||
### Google Cloud Platform (GCP)
|
||||
|
||||
|
@ -48,5 +48,5 @@ Table captions:
|
|||
|
||||
| Name | Status | Component version | Since |
|
||||
|-----------------------------------------------------------|--------| ----------------| -- |
|
||||
| [Azure Event Hubs]({{< ref setup-azure-eventhubs.md >}}) | Alpha | v1 | 1.0 |
|
||||
| [Azure Event Hubs]({{< ref setup-azure-eventhubs.md >}}) | Beta | v1 | 1.6 |
|
||||
| [Azure Service Bus]({{< ref setup-azure-servicebus.md >}})| Stable | v1 | 1.0 |
|
||||
|
|
|
@ -27,11 +27,11 @@ spec:
|
|||
value: "group1"
|
||||
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
|
||||
value: "my-dapr-app-id"
|
||||
- name: authRequired # Required.
|
||||
value: "true"
|
||||
- name: saslUsername # Required if authRequired is `true`.
|
||||
- name: authType # Required.
|
||||
value: "password"
|
||||
- name: saslUsername # Required if authType is `password`.
|
||||
value: "adminuser"
|
||||
- name: saslPassword # Required if authRequired is `true`.
|
||||
- name: saslPassword # Required if authType is `password`.
|
||||
secretKeyRef:
|
||||
name: kafka-secrets
|
||||
key: saslPasswordSecret
|
||||
|
@ -50,22 +50,159 @@ spec:
|
|||
| brokers | Y | A comma-separated list of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"`
|
||||
| consumerGroup | N | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"`
|
||||
| clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"sarama"`. | `"my-dapr-app"`
|
||||
| authRequired | Y | Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"`
|
||||
| saslUsername | N | The SASL username used for authentication. Only required if `authRequired` is set to `"true"`. | `"adminuser"`
|
||||
| saslPassword | N | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authRequired` is set to `"true"`. | `""`, `"KeFg23!"`
|
||||
| authRequired | N | *Deprecated* Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"`
|
||||
| authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"`
|
||||
| saslUsername | N | The SASL username used for authentication. Only required if `authType` is set to `"password"`. | `"adminuser"`
|
||||
| saslPassword | N | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authType is set to `"password"`. | `""`, `"KeFg23!"`
|
||||
| initialOffset | N | The initial offset to use if no offset was previously committed. Should be "newest" or "oldest". Defaults to "newest". | `"oldest"`
|
||||
| maxMessageBytes | N | The maximum size in bytes allowed for a single Kafka message. Defaults to 1024. | `2048`
|
||||
| consumeRetryInterval | N | The interval between retries when attempting to consume topics. Treats numbers without suffix as milliseconds. Defaults to 100ms. | `200ms`
|
||||
| version | N | Kafka cluster version. Defaults to 2.0.0.0 | `0.10.2.0`
|
||||
| caCert | N | Certificate authority certificate, required for using TLS. Can be `secretKeyRef` to use a secret reference | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
|
||||
| clientCert | N | Client certificate, required for using TLS. Can be `secretKeyRef` to use a secret reference | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
|
||||
| clientKey | N | Client key, required for using TLS. Can be `secretKeyRef` to use a secret reference | `"-----BEGIN RSA PRIVATE KEY-----\n<base64-encoded PKCS8>\n-----END RSA PRIVATE KEY-----"`
|
||||
| clientCert | N | Client certificate, required for `authType` `mtls`. Can be `secretKeyRef` to use a secret reference | `"-----BEGIN CERTIFICATE-----\n<base64-encoded DER>\n-----END CERTIFICATE-----"`
|
||||
| clientKey | N | Client key, required for `authType` `mtls` Can be `secretKeyRef` to use a secret reference | `"-----BEGIN RSA PRIVATE KEY-----\n<base64-encoded PKCS8>\n-----END RSA PRIVATE KEY-----"`
|
||||
| skipVerify | N | Skip TLS verification, this is not recommended for use in production. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| disableTls | N | Disable TLS for transport security. This is not recommended for use in production. Defaults to `"false"` | `"true"`, `"false"` |
|
||||
| oidcTokenEndpoint | N | Full URL to an OAuth2 identity provider access token endpoint. Required when `authType` is set to `oidc` | "https://identity.example.com/v1/token" |
|
||||
| oidcClientID | N | The OAuth2 client ID that has been provisioned in the identity provider. Required when `authType is set to `oidc` | `dapr-kafka` |
|
||||
| oidcClientSecret | N | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` |
|
||||
| oidcScopes | N | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | '"openid,kafka-prod"` |
|
||||
|
||||
### Communication using TLS
|
||||
To configure communication using TLS, ensure the Kafka broker is configured to support certificates.
|
||||
Pre-requisite includes `certficate authority certificate`, `ca issued client certificate`, `client private key`.
|
||||
Below is an example of a Kafka pubsub component configured to use TLS:
|
||||
|
||||
The `secretKeyRef` above is referencing a [kubernetes secrets store]({{< ref kubernetes-secret-store.md >}}) to access the tls information. Visit [here]({{< ref setup-secret-store.md >}}) to learn more about how to configure a secret store component.
|
||||
|
||||
### Authentication
|
||||
|
||||
Kafka supports a variety of authentication schemes and Dapr supports several: SASL password, mTLS, OIDC/OAuth2. With the added authentication methods, the `authRequired` field has
|
||||
been deprecated from the v1.6 release and instead the `authType` field should be used. If `authRequired` is set to `true`, Dapr will attempt to configure `authType` correctly
|
||||
based on the value of `saslPassword`. There are four valid values for `authType`: `none`, `password`, `mtls`, and `oidc`. Note this is authentication only; authorization is still configured within Kafka.
|
||||
|
||||
#### None
|
||||
|
||||
Setting `authType` to `none` will disable any authentication. This is *NOT* recommended in production.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: kafka-pubsub-noauth
|
||||
namespace: default
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers # Required. Kafka broker connection setting
|
||||
value: "dapr-kafka.myapp.svc.cluster.local:9092"
|
||||
- name: consumerGroup # Optional. Used for input bindings.
|
||||
value: "group1"
|
||||
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
|
||||
value: "my-dapr-app-id"
|
||||
- name: authType # Required.
|
||||
value: "none"
|
||||
- name: maxMessageBytes # Optional.
|
||||
value: 1024
|
||||
- name: consumeRetryInterval # Optional.
|
||||
value: 200ms
|
||||
- name: version # Optional.
|
||||
value: 0.10.2.0
|
||||
- name: disableTls
|
||||
value: "true"
|
||||
```
|
||||
|
||||
#### SASL Password
|
||||
|
||||
Setting `authType` to `password` enables [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication using the **PLAIN** mechanism. This requires setting
|
||||
the `saslUsername` and `saslPassword` fields.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: kafka-pubsub-sasl
|
||||
namespace: default
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers # Required. Kafka broker connection setting
|
||||
value: "dapr-kafka.myapp.svc.cluster.local:9092"
|
||||
- name: consumerGroup # Optional. Used for input bindings.
|
||||
value: "group1"
|
||||
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
|
||||
value: "my-dapr-app-id"
|
||||
- name: authType # Required.
|
||||
value: "password"
|
||||
- name: saslUsername # Required if authType is `password`.
|
||||
value: "adminuser"
|
||||
- name: saslPassword # Required if authType is `password`.
|
||||
secretKeyRef:
|
||||
name: kafka-secrets
|
||||
key: saslPasswordSecret
|
||||
- name: maxMessageBytes # Optional.
|
||||
value: 1024
|
||||
- name: consumeRetryInterval # Optional.
|
||||
value: 200ms
|
||||
- name: version # Optional.
|
||||
value: 0.10.2.0
|
||||
- name: caCert
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: caCert
|
||||
```
|
||||
|
||||
#### Mutual TLS
|
||||
|
||||
Setting `authType` to `mtls` uses a x509 client certificate (the `clientCert` field) and key (the `clientKey` field) to authenticate. Note that mTLS as an
|
||||
authentication mechanism is distinct from using TLS to secure the transport layer via encryption. mTLS requires TLS transport (meaning `disableTls` must be `false`), but securing
|
||||
the transport layer does not require using mTLS. See [Communication using TLS](#communication-using-tls) for configuring underlying TLS transport.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: kafka-pubsub-mtls
|
||||
namespace: default
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers # Required. Kafka broker connection setting
|
||||
value: "dapr-kafka.myapp.svc.cluster.local:9092"
|
||||
- name: consumerGroup # Optional. Used for input bindings.
|
||||
value: "group1"
|
||||
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
|
||||
value: "my-dapr-app-id"
|
||||
- name: authType # Required.
|
||||
value: "mtls"
|
||||
- name: caCert
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: caCert
|
||||
- name: clientCert
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: clientCert
|
||||
- name: clientKey
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: clientKey
|
||||
- name: maxMessageBytes # Optional.
|
||||
value: 1024
|
||||
- name: consumeRetryInterval # Optional.
|
||||
value: 200ms
|
||||
- name: version # Optional.
|
||||
value: 0.10.2.0
|
||||
```
|
||||
|
||||
#### OAuth2 or OpenID Connect
|
||||
|
||||
Setting `authType` to `oidc` enables SASL authentication via the **OAUTHBEARER** mechanism. This supports specifying a bearer
|
||||
token from an external OAuth2 or [OIDC](https://en.wikipedia.org/wiki/OpenID) identity provider. Currenly only the **client_credentials** grant is supported. Configure `oidcTokenEndpoint` to
|
||||
the full URL for the identity provider access token endpoint. Set `oidcClientID` and `oidcClientSecret` to the client credentials provisioned in the identity provider. If `caCert`
|
||||
is specified in the component configuration, the certificate is appended to the system CA trust for verifying the identity provider certificate. Similarly, if `skipVerify`
|
||||
is specified in the component configuration, verification will also be skipped when accessing the identity provider. By default, the only scope requested for the token is `openid`; it is **highly** recommended
|
||||
that additional scopes be specified via `oidcScopes` in a comma-separated list and validated by the Kafka broker. If additional scopes are not used to narrow the validity of the access token,
|
||||
a compromised Kafka broker could replay the token to access other services as the Dapr clientID.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -83,9 +220,57 @@ spec:
|
|||
value: "group1"
|
||||
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
|
||||
value: "my-dapr-app-id"
|
||||
- name: authRequired # Required.
|
||||
value: "true"
|
||||
- name: saslUsername # Required if authRequired is `true`.
|
||||
- name: authType # Required.
|
||||
value: "oidc"
|
||||
- name: oidcTokenEndpoint # Required if authType is `oidc`.
|
||||
value: "https://identity.example.com/v1/token"
|
||||
- name: oidcClientID # Required if authType is `oidc`.
|
||||
value: "dapr-myapp"
|
||||
- name: oidcClientSecret # Required if authType is `oidc`.
|
||||
secretKeyRef:
|
||||
name: kafka-secrets
|
||||
key: oidcClientSecret
|
||||
- name: oidcScopes # Recommended if authType is `oidc`.
|
||||
value: "openid,kafka-dev"
|
||||
- name: caCert # Also applied to verifying OIDC provider certificate
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: caCert
|
||||
- name: maxMessageBytes # Optional.
|
||||
value: 1024
|
||||
- name: consumeRetryInterval # Optional.
|
||||
value: 200ms
|
||||
- name: version # Optional.
|
||||
value: 0.10.2.0
|
||||
```
|
||||
|
||||
### Communication using TLS
|
||||
|
||||
By default TLS is enabled to secure the transport layer to Kafka. To disable TLS, set `disableTls` to `true`. When TLS is enabled, you can
|
||||
control server certificate verification using `skipVerify` to disable verificaiton (*NOT* recommended in production environments) and `caCert` to
|
||||
specify a trusted TLS certificate authority (CA). If no `caCert` is specified, the system CA trust will be used. To also configure mTLS authentication,
|
||||
see the section under _Authentication_.
|
||||
Below is an example of a Kafka pubsub component configured to use transport layer TLS:
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: kafka-pubsub
|
||||
namespace: default
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers # Required. Kafka broker connection setting
|
||||
value: "dapr-kafka.myapp.svc.cluster.local:9092"
|
||||
- name: consumerGroup # Optional. Used for input bindings.
|
||||
value: "group1"
|
||||
- name: clientID # Optional. Used as client tracing ID by Kafka brokers.
|
||||
value: "my-dapr-app-id"
|
||||
- name: authType # Required.
|
||||
value: "password"
|
||||
- name: saslUsername # Required if authType is `password`.
|
||||
value: "adminuser"
|
||||
- name: consumeRetryInterval # Optional.
|
||||
value: 200ms
|
||||
|
@ -101,21 +286,10 @@ spec:
|
|||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: caCert
|
||||
- name: clientCert # Client certificate.
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: clientCert
|
||||
- name: clientKey # Client key.
|
||||
secretKeyRef:
|
||||
name: kafka-tls
|
||||
key: clientKey
|
||||
auth:
|
||||
secretStore: <SECRET_STORE_NAME>
|
||||
```
|
||||
|
||||
The `secretKeyRef` above is referencing a [kubernetes secrets store]({{< ref kubernetes-secret-store.md >}}) to access the tls information. Visit [here]({{< ref setup-secret-store.md >}}) to learn more about how to configure a secret store component.
|
||||
|
||||
|
||||
## Per-call metadata fields
|
||||
|
||||
### Partition Key
|
||||
|
@ -136,6 +310,20 @@ curl -X POST http://localhost:3500/v1.0/publish/myKafka/myTopic?metadata.partiti
|
|||
}'
|
||||
```
|
||||
|
||||
### Message headers
|
||||
|
||||
All other metadata key/value pairs (that are not `partitionKey`) are set as headers in the Kafka message. Here is an example setting a `correlationId` for the message.
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3500/v1.0/publish/myKafka/myTopic?metadata.correlationId=myCorrelationID&metadata.partitionKey=key1 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"data": {
|
||||
"message": "Hi"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Create a Kafka instance
|
||||
|
||||
{{< tabs "Self-Hosted" "Kubernetes">}}
|
||||
|
@ -154,4 +342,4 @@ To run Kafka on Kubernetes, you can use any Kafka operator, such as [Strimzi](ht
|
|||
## Related links
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-publish-subscribe.md##step-1-setup-the-pubsub-component" >}}) for instructions on configuring pub/sub components
|
||||
- [Pub/Sub building block]({{< ref pubsub >}})
|
||||
- [Pub/Sub building block]({{< ref pubsub >}})
|
||||
|
|
|
@ -26,16 +26,35 @@ spec:
|
|||
value: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
- name: region
|
||||
value: "us-east-1"
|
||||
- name: sessionToken
|
||||
value: "TOKEN"
|
||||
- name: messageVisibilityTimeout
|
||||
value: 10
|
||||
- name: messageRetryLimit
|
||||
value: 10
|
||||
- name: messageWaitTimeSeconds
|
||||
value: 1
|
||||
- name: messageMaxNumber
|
||||
value: 10
|
||||
# - name: endpoint # Optional.
|
||||
# value: "http://localhost:4566"
|
||||
# - name: sessionToken # Optional (mandatory if using AssignedRole, i.e. temporary accessKey and secretKey)
|
||||
# value: "TOKEN"
|
||||
# - name: messageVisibilityTimeout # Optional
|
||||
# value: 10
|
||||
# - name: messageRetryLimit # Optional
|
||||
# value: 10
|
||||
# - name: messageReceiveLimit # Optional
|
||||
# value: 10
|
||||
# - name: sqsDeadLettersQueueName # Optional
|
||||
# - value: "myapp-dlq"
|
||||
# - name: messageWaitTimeSeconds # Optional
|
||||
# value: 1
|
||||
# - name: messageMaxNumber # Optional
|
||||
# value: 10
|
||||
# - name: fifo # Optional
|
||||
# value: "true"
|
||||
# - name: fifoMessageGroupID # Optional
|
||||
# value: "app1-mgi"
|
||||
# - name: disableEntityManagement # Optional
|
||||
# value: "false"
|
||||
# - name: disableDeleteOnRetryLimit # Optional
|
||||
# value: "false"
|
||||
# - name: assetsManagementTimeoutSeconds # Optional
|
||||
# value: 5
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
|
@ -46,22 +65,42 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| accessKey | Y | ID of the AWS account with appropriate permissions to SNS and SQS. Can be `secretKeyRef` to use a secret reference | `"AKIAIOSFODNN7EXAMPLE"`
|
||||
| secretKey | Y | Secret for the AWS user. Can be `secretKeyRef` to use a secret reference |`"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"`
|
||||
| region | Y | The AWS region to the instance. See this page for valid regions: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html. Ensure that SNS and SQS are available in that region.| `"us-east-1"`
|
||||
| endpoint | N |AWS endpoint for the component to use. Only used for local development. The `endpoint` is unncessary when running against production AWS | `"http://localhost:4566"`
|
||||
| sessionToken | N |AWS session token to use. A session token is only required if you are using temporary security credentials | `"TOKEN"`
|
||||
| messageVisibilityTimeout | N |Amount of time in seconds that a message is hidden from receive requests after it is sent to a subscriber. Default: `10` | `10`
|
||||
| messageRetryLimit | N |Number of times to resend a message after processing of that message fails before removing that message from the queue. Default: `10` | `10`
|
||||
| messageWaitTimeSeconds | N |amount of time to await receipt of a message before making another request. Default: `1` | `1`
|
||||
| messageMaxNumber | N |maximum number of messages to receive from the queue at a time. Default: `10`, Maximum: `10` | `10`
|
||||
| accessKey | Y | ID of the AWS account/role with appropriate permissions to SNS and SQS (see below) | `"AKIAIOSFODNN7EXAMPLE"`
|
||||
| secretKey | Y | Secret for the AWS user/role. If using an `AssumeRole` access, you will also need to provide a `sessionToken` |`"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"`
|
||||
| region | Y | The AWS region where the SNS/SQS assets are located or be created in. See [this page](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ugi&l=na) for valid regions. Ensure that SNS and SQS are available in that region | `"us-east-1"`
|
||||
| endpoint | N | AWS endpoint for the component to use. Only used for local development with, for example, [localstack](https://github.com/localstack/localstack). The `endpoint` is unncessary when running against production AWS | `"http://localhost:4566"`
|
||||
| sessionToken | N | AWS session token to use. A session token is only required if you are using temporary security credentials | `"TOKEN"`
|
||||
| messageReceiveLimit | N | Number of times a message is received, after processing of that message fails, that once reached, results in removing of that message from the queue. If `sqsDeadLettersQueueName` is specified, `messageReceiveLimit` is the number of times a message is received, after processing of that message fails, that once reached, results in moving of the message to the SQS dead-letters queue. Default: `10` | `10`
|
||||
| sqsDeadLettersQueueName | N | Name of the dead letters queue for this application | `"myapp-dlq"`
|
||||
| messageVisibilityTimeout | N | Amount of time in seconds that a message is hidden from receive requests after it is sent to a subscriber. Default: `10` | `10`
|
||||
| messageRetryLimit | N | Number of times to resend a message after processing of that message fails before removing that message from the queue. Default: `10` | `10`
|
||||
| messageWaitTimeSeconds | N | The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than `messageWaitTimeSeconds`. If no messages are available and the wait time expires, the call returns successfully with an empty list of messages. Default: `1` | `1`
|
||||
| messageMaxNumber | N | Maximum number of messages to receive from the queue at a time. Default: `10`, Maximum: `10` | `10`
|
||||
| fifo | N | Use SQS FIFO queue to provide message ordering and deduplication. Default: `"false"`. See further details about [SQS FIFO](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) | `"true"`, `"false"`
|
||||
| fifoMessageGroupID | N | If `fifo` is enabled, instructs Dapr to use a custom [Message Group ID](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) for the pubsub deployment. This is not mandatory as Dapr creates a custom Message Group ID for each producer, thus ensuring ordering of messages per a Dapr producer. Default: `""` | `"app1-mgi"`
|
||||
| disableEntityManagement | N | When set to true, SNS topics, SQS queues and the SQS subscriptions to SNS do not get created automatically. Default: `"false"` | `"true"`, `"false"`
|
||||
| disableDeleteOnRetryLimit | N | When set to true, after retrying and failing of `messageRetryLimit` times processing a message, reset the message visibility timeout so that other consumers can try processing, instead of deleting the message from SQS (the default behvior). Default: `"false"` | `"true"`, `"false"`
|
||||
| assetsManagementTimeoutSeconds | N | Amount of time in seconds, for an AWS asset management operation, before it times out and cancelled. Asset management operations are any operations performed on STS, SNS and SQS, except message publish and consume operations that implement the default Dapr component retry behavior. The value can be set to any non-negative float/integer. Default: `5` | `0.5`, `10`
|
||||
|
||||
|
||||
* Dapr created SNS topic and SQS queue names conform with [AWS specifications](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-queues.html). By default, Dapr creates an SQS queue name based on the consumer `app-id`, therefore Dapr might perform name standardization to meet with AWS specifications.
|
||||
* Using SQS FIFO (`fifo` metadata field set to `"true"`), per AWS specifications, provides message ordering and deduplication, but incurs a lower SQS processing throughput, among other caveats
|
||||
* Be aware that specifying `fifoMessageGroupID` limits the number of concurrent consumers of the FIFO queue used to only one but guarantees global ordering of messages published by the app's Dapr sidecars. See [this](https://aws.amazon.com/blogs/compute/solving-complex-ordering-challenges-with-amazon-sqs-fifo-queues/) post to better understand the topic of Message Group IDs and FIFO queues.
|
||||
|
||||
|
||||
|
||||
## Create an SNS/SQS instance
|
||||
|
||||
{{< tabs "Self-Hosted" "Kubernetes" "AWS" >}}
|
||||
|
||||
{{% codetab %}}
|
||||
For local development the [localstack project](https://github.com/localstack/localstack) is used to integrate AWS SNS/SQS. Follow the instructions [here](https://github.com/localstack/localstack#installing) to install the localstack CLI.
|
||||
For local development the [localstack project](https://github.com/localstack/localstack) is used to integrate AWS SNS/SQS. Follow the instructions [here](https://github.com/localstack/localstack#running) to run localstack.
|
||||
|
||||
To run localstack locally from the command line using Docker, apply the following cmd:
|
||||
```shell
|
||||
docker run --rm -it -p 4566:4566 -p 4571:4571 -e SERVICES="sts,sns,sqs" -e AWS_DEFAULT_REGION="us-east-1" localstack/localstack
|
||||
```
|
||||
|
||||
|
||||
In order to use localstack with your pubsub binding, you need to provide the `endpoint` configuration
|
||||
in the component metadata. The `endpoint` is unncessary when running against production AWS.
|
||||
|
@ -77,9 +116,13 @@ spec:
|
|||
type: pubsub.snssqs
|
||||
version: v1
|
||||
metadata:
|
||||
- name: accessKey
|
||||
value: "anyString"
|
||||
- name: secretKey
|
||||
value: "anyString"
|
||||
- name: endpoint
|
||||
value: http://localhost:4566
|
||||
# Use us-east-1 for localstack
|
||||
# Use us-east-1 or any other region if provided to localstack as defined by "AWS_DEFAULT_REGION" envvar
|
||||
- name: region
|
||||
value: us-east-1
|
||||
```
|
||||
|
@ -133,8 +176,71 @@ spec:
|
|||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
In order to run in AWS, you should create an IAM user with permissions to the SNS and SQS services.
|
||||
In order to run in AWS, you should create or assign an IAM user with permissions to the SNS and SQS services having a Policy such as:
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "YOUR_POLICY_NAME",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"sqs:CreateQueue",
|
||||
"sqs:DeleteMessage",
|
||||
"sqs:ReceiveMessage",
|
||||
"sqs:ChangeMessageVisibility",
|
||||
"sqs:GetQueueUrl",
|
||||
"sqs:GetQueueAttributes",
|
||||
"sqs:SetQueueAttributes",
|
||||
"sns:CreateTopic",
|
||||
"sns:ListSubscriptionsByTopic",
|
||||
"sns:Publish",
|
||||
"sns:Subscribe",
|
||||
"sns:ListSubscriptionsByTopic",
|
||||
"sns:GetTopicAttributes"
|
||||
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:sns:AWS_REGION:AWS_ACCOUNT_ID:*",
|
||||
"arn:aws:sqs:AWS_REGION:AWS_ACCOUNT_ID:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
Use the `AWS account ID` and `AWS account secret` and plug them into the `accessKey` and `secretKey` in the component metadata using Kubernetes secrets and `secretKeyRef`.
|
||||
|
||||
|
||||
Alternatively, if you want to provision the SNS and SQS assets using your own tool of choice (e.g. Terraform), while preventing Dapr from doing so dynamically, you need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role having a Policy such as:
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "YOUR_POLICY_NAME",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"sqs:DeleteMessage",
|
||||
"sqs:ReceiveMessage",
|
||||
"sqs:ChangeMessageVisibility",
|
||||
"sqs:GetQueueUrl",
|
||||
"sqs:GetQueueAttributes",
|
||||
"sns:Publish",
|
||||
"sns:ListSubscriptionsByTopic",
|
||||
"sns:GetTopicAttributes"
|
||||
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:sns:AWS_REGION:AWS_ACCOUNT_ID:APP_TOPIC_NAME",
|
||||
"arn:aws:sqs:AWS_REGION:AWS_ACCOUNT_ID:APP_ID"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
If you are running your applications on an EKS cluster with dynamic assets creation (the default Dapr behavior)
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
|
|
@ -9,6 +9,7 @@ aliases:
|
|||
|
||||
## Component format
|
||||
To setup Azure Event Hubs pubsub create a component of type `pubsub.azure.eventhubs`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
|
||||
Apart from the configuration metadata fields shown below, Azure Event Hubs also supports [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -20,8 +21,23 @@ spec:
|
|||
type: pubsub.azure.eventhubs
|
||||
version: v1
|
||||
metadata:
|
||||
- name: connectionString
|
||||
- name: connectionString # Either connectionString or eventHubNamespace. Should not be used when
|
||||
# Azure Authentication mechanism is used.
|
||||
value: "Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"
|
||||
- name: eventHubNamespace # Either connectionString or eventHubNamespace. Should be used when
|
||||
# Azure Authentication mechanism is used.
|
||||
value: "namespace"
|
||||
- name: enableEntityManagement
|
||||
value: "false"
|
||||
## The following four properties are needed only if enableEntityManagement is set to true
|
||||
- name: resourceGroupName
|
||||
value: "test-rg"
|
||||
- name: subscriptionID
|
||||
value: "value of Azure subscription ID"
|
||||
- name: partitionCount
|
||||
value: "1"
|
||||
- name: messageRetentionInDays
|
||||
## Subscriber attributes
|
||||
- name: storageAccountName
|
||||
value: "myeventhubstorage"
|
||||
- name: storageAccountKey
|
||||
|
@ -38,10 +54,16 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| connectionString | Y | Connection-string for the Event Hubs | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"`
|
||||
| connectionString | Y | Connection-string for the Event Hub or the Event Hub namespace. Mutally exclusive with `eventHubNamespace` field. Not to be used when [Azure Authentication]({{< ref "authenticating-azure.md" >}}) is used | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"`
|
||||
| eventHubNamespace | Y | The Event Hub Namespace name. Mutally exclusive with `connectionString` field. To be used when [Azure Authentication]({{< ref "authenticating-azure.md" >}}) is used | `"namespace"`
|
||||
| storageAccountName | Y | Storage account name to use for the EventProcessorHost |`"myeventhubstorage"`
|
||||
| storageAccountKey | Y | Storage account key to use for the EventProcessorHost. Can be `secretKeyRef` to use a secret reference | `"112233445566778899"`
|
||||
| storageContainerName | Y | Storage container name for the storage account name. | `"myeventhubstoragecontainer"`
|
||||
| enableEntityManagement | N | Boolean value to allow management of EventHub namespace. Default: `false` | `"true", "false"`
|
||||
| resourceGroupName | N | Name of the resource group the event hub namespace is a part of. Needed when entity management is enabled | `"test-rg"`
|
||||
| subscriptionID | N | Azure subscription ID value. Needed when entity management is enabled | `"azure subscription id"`
|
||||
| partitionCount | N | Number of partitions for the new event hub. Only used when entity management is enabled. Default: `"1"` | `"2"`
|
||||
| messageRetentionInDays | N | Number of days to retain messages for in the newly created event hub. Used only when entity management is enabled. Default: `"1"` | `"90"`
|
||||
|
||||
|
||||
## Create an Azure Event Hub
|
||||
|
@ -58,6 +80,16 @@ For example, a Dapr app running on Kubernetes with `dapr.io/app-id: "myapp"` wil
|
|||
|
||||
Note: Dapr passes the name of the Consumer group to the EventHub and so this is not supplied in the metadata.
|
||||
|
||||
## Entity Management
|
||||
|
||||
When entity management is enabled in configuration, as long as the application has the right role and permissions to manipulate the Event Hub namespace, creation of Event Hubs and consumer groups can be done on the fly.
|
||||
|
||||
The Evet Hub name is the `topic` field in the incoming request to publish or subscribe to, while the consumer group name is the name of the `dapr app` which subscribes to a given Event Hub. For example, a Dapr app running on Kubernetes with name `dapr.io/app-id: "myapp"` requires an Event Hubs consumer group named `myapp`.
|
||||
|
||||
Entity management is only possible when using [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms and not via `connectionString`.
|
||||
|
||||
Note: Dapr passes the name of the Consumer group to the EventHub and this is not supplied in the metadata.
|
||||
|
||||
## Subscribing to Azure IoT Hub Events
|
||||
|
||||
Azure IoT Hub provides an [endpoint that is compatible with Event Hubs](https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messages-read-builtin#read-from-the-built-in-endpoint), so the Azure Event Hubs pubsub component can also be used to subscribe to Azure IoT Hub events.
|
||||
|
@ -98,3 +130,4 @@ For example, the headers of a delivered HTTP subscription message would contain:
|
|||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components
|
||||
- [Pub/Sub building block]({{< ref pubsub >}})
|
||||
- [Authentication to Azure]({{< ref "authenticating-azure.md" >}})
|
||||
|
|
|
@ -82,6 +82,14 @@ with NATS, find the service with: `kubectl get svc my-nats`.
|
|||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Create JetStream
|
||||
|
||||
It is essential to create a NATS JetStream for a specific subject. For example, for a NATS server running locally use:
|
||||
|
||||
```bash
|
||||
nats -s localhost:4222 stream add myStream --subjects mySubject
|
||||
```
|
||||
|
||||
## Related links
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components
|
||||
|
|
|
@ -8,7 +8,7 @@ aliases:
|
|||
---
|
||||
|
||||
## Component format
|
||||
To setup Pulsar pubsub create a component of type `pubsub.pulsar`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration.
|
||||
To setup Apache Pulsar pubsub create a component of type `pubsub.pulsar`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For more information on Apache Pulsar [read the docs](https://pulsar.apache.org/docs/en/concepts-overview/)
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
|
@ -24,15 +24,40 @@ spec:
|
|||
value: "localhost:6650"
|
||||
- name: enableTLS
|
||||
value: "false"
|
||||
- name: tenant
|
||||
value: "public"
|
||||
- name: token
|
||||
value: "eyJrZXlJZCI6InB1bHNhci1wajU0cXd3ZHB6NGIiLCJhbGciOiJIUzI1NiJ9.eyJzd"
|
||||
- name: namespace
|
||||
value: "default"
|
||||
- name: persistent
|
||||
value: "true"
|
||||
- name: backOffPolicy
|
||||
value: "constant"
|
||||
- name: backOffMaxRetries
|
||||
value: "-1"
|
||||
- name: disableBatching
|
||||
value: "false"
|
||||
```
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| host | Y | Address of the Pulsar broker. Default is `"localhost:6650"` | `"localhost:6650"`|
|
||||
| enableTLS | N | Enable TLS. Default: `"false"` | `"true"`, `"false"`|
|
||||
|
||||
| host | Y | Address of the Pulsar broker. Default is `"localhost:6650"` | `"localhost:6650"` OR `"http://pulsar-pj54qwwdpz4b-pulsar.ap-sg.public.pulsar.com:8080"`|
|
||||
| enableTLS | N | Enable TLS. Default: `"false"` | `"true"`, `"false"` |
|
||||
| token | N | Enable Authentication. | [How to create pulsar token](https://pulsar.apache.org/docs/en/security-jwt/#generate-tokens)|
|
||||
| tenant | N | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. Default: `"public"` | `"public"` |
|
||||
| namespace | N | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Default: `"default"` | `"default"`
|
||||
| persistent | N | Pulsar supports two kind of topics: [persistent](https://pulsar.apache.org/docs/en/concepts-architecture-overview#persistent-storage) and [non-persistent](https://pulsar.apache.org/docs/en/concepts-messaging/#non-persistent-topics). With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topics is not persisted to storage disks. Note: the default retry behavior is to retry until it succeeds, so when you use a non-persistent theme, you can reduce or prohibit retries by defining `backOffMaxRetries` to `0`. Default: `"true"` | `"true"`, `"false"`
|
||||
| backOffPolicy | N | Retry policy, `"constant"` is a backoff policy that always returns the same backoff delay. `"exponential"` is a backoff policy that increases the backoff period for each retry attempt using a randomization function that grows exponentially. Defaults to `"constant"`. | `constant`、`exponential` |
|
||||
| backOffDuration | N | The fixed interval only takes effect when the `backOffPolicy` is `"constant"`. There are two valid formats, one is the fraction with a unit suffix format, and the other is the pure digital format that is processed as milliseconds. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Defaults to `"5s"`. | `"5s"`、`"5000"` |
|
||||
| backOffInitialInterval | N | The backoff initial interval on retry. Only takes effect when the `backOffPolicy` is `"exponential"`. There are two valid formats, one is the fraction with a unit suffix format, and the other is the pure digital format that is processed as milliseconds. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Defaults to `"500"` | `"50"` |
|
||||
| backOffMaxInterval | N | The backoff initial interval on retry. Only takes effect when the `backOffPolicy` is `"exponential"`. There are two valid formats, one is the fraction with a unit suffix format, and the other is the pure digital format that is processed as milliseconds. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Defaults to `"60s"` | `"60000"` |
|
||||
| backOffMaxRetries | N | The maximum number of retries to process the message before returning an error. Defaults to `"0"` which means the component will not retry processing the message. `"-1"` will retry indefinitely until the message is processed or the application is shutdown. Any positive number is treated as the maximum retry count. | `"3"` |
|
||||
| backOffRandomizationFactor | N | Randomization factor, between 1 and 0, including 0 but not 1. Randomized interval = RetryInterval * (1 ± backOffRandomizationFactor). Defaults to `"0.5"`. | `"0.5"` |
|
||||
| backOffMultiplier | N | Backoff multiplier for the policy. Increments the interval by multiplying it with the multiplier. Defaults to `"1.5"` | `"1.5"` |
|
||||
| backOffMaxElapsedTime | N | After MaxElapsedTime the ExponentialBackOff returns Stop. There are two valid formats, one is the fraction with a unit suffix format, and the other is the pure digital format that is processed as milliseconds. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Defaults to `"15m"` | `"15m"` |
|
||||
| disableBatching | N | disable batching. Default: `"false"` | `"true"`, `"false"`|
|
||||
|
||||
### Delay queue
|
||||
|
||||
|
|
|
@ -21,34 +21,36 @@ spec:
|
|||
metadata:
|
||||
- name: host
|
||||
value: "amqp://localhost:5672"
|
||||
- name: consumerID
|
||||
value: myapp
|
||||
- name: durable
|
||||
value: "false"
|
||||
value: false
|
||||
- name: deletedWhenUnused
|
||||
value: "false"
|
||||
value: false
|
||||
- name: autoAck
|
||||
value: "false"
|
||||
value: false
|
||||
- name: deliveryMode
|
||||
value: "0"
|
||||
value: 0
|
||||
- name: requeueInFailure
|
||||
value: "false"
|
||||
value: false
|
||||
- name: prefetchCount
|
||||
value: "0"
|
||||
value: 0
|
||||
- name: reconnectWait
|
||||
value: "0"
|
||||
value: 0
|
||||
- name: concurrencyMode
|
||||
value: parallel
|
||||
- name: backOffPolicy
|
||||
value: "exponential"
|
||||
value: exponential
|
||||
- name: backOffInitialInterval
|
||||
value: "100"
|
||||
value: 100
|
||||
- name: backOffMaxRetries
|
||||
value: "16"
|
||||
value: 16
|
||||
- name: enableDeadLetter # Optional enable dead Letter or not
|
||||
value: "true"
|
||||
value: true
|
||||
- name: maxLen # Optional max message count in a queue
|
||||
value: "3000"
|
||||
value: 3000
|
||||
- name: maxLenBytes # Optional maximum length in bytes of a queue.
|
||||
value: "10485760"
|
||||
value: 10485760
|
||||
```
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
|
||||
|
@ -59,6 +61,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|
|||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| host | Y | Connection-string for the rabbitmq host | `amqp://user:pass@localhost:5672`
|
||||
| consumerID | N | Consumer ID a.k.a consumer tag organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. |
|
||||
| durable | N | Whether or not to use [durable](https://www.rabbitmq.com/queues.html#durability) queues. Defaults to `"false"` | `"true"`, `"false"`
|
||||
| deletedWhenUnused | N | Whether or not the queue should be configured to [auto-delete](https://www.rabbitmq.com/queues.html) Defaults to `"true"` | `"true"`, `"false"`
|
||||
| autoAck | N | Whether or not the queue consumer should [auto-ack](https://www.rabbitmq.com/confirms.html) messages. Defaults to `"false"` | `"true"`, `"false"`
|
||||
|
|
|
@ -46,3 +46,9 @@ Table captions:
|
|||
| Name | Status | Component version | Since |
|
||||
|---------------------------------------------------------------------------------------|--------| ---- |--------------|
|
||||
| [Azure Key Vault]({{< ref azure-keyvault.md >}}) | Stable | v1 | 1.0 |
|
||||
|
||||
### Alibaba Cloud
|
||||
|
||||
| Name | Status | Component version | Since |
|
||||
|---------------------------------------------------------------------------------------|--------| ---- |--------------|
|
||||
| [AlibabaCloud OOS Parameter Store]({{< ref alicloud-oos-parameter-store.md >}}) | Alpha | v1 | 1.6 |
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
type: docs
|
||||
title: "AlibabaCloud OOS Parameter Store"
|
||||
linkTitle: "AlibabaCloud OOS Parameter Store"
|
||||
description: Detailed information on the AlibabaCloud OOS Parameter Store - secret store component
|
||||
aliases:
|
||||
- "/operations/components/setup-secret-store/supported-secret-stores/alibabacloud-oos-parameter-store/"
|
||||
---
|
||||
|
||||
## Component format
|
||||
|
||||
To setup AlibabaCloud OOS Parameter Store secret store create a component of type `secretstores.alicloud.parameterstore`. See [this guide]({{< ref "setup-secret-store.md#apply-the-configuration" >}}) on how to create and apply a secretstore configuration. See this guide on [referencing secrets]({{< ref component-secrets.md >}}) to retrieve and use the secret with Dapr components.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: alibabacloudparameterstore
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.alicloud.parameterstore
|
||||
version: v1
|
||||
metadata:
|
||||
- name: regionId
|
||||
value: "[alicloud_region_id]"
|
||||
- name: accessKeyId
|
||||
value: "[alicloud_access_key_id]"
|
||||
- name: accessKeySecret
|
||||
value: "[alicloud_access_key_secret]"
|
||||
- name: securityToken
|
||||
value: "[alicloud_security_token]"
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a local secret store such as [Kubernetes secret store]({{< ref kubernetes-secret-store.md >}}) or a [local file]({{< ref file-secret-store.md >}}) to bootstrap secure key storage.
|
||||
{{% /alert %}}
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|-------------------------------------------------------------------------|---------------------|
|
||||
| regionId | Y | The specific region the AlibabaCloud OOS Parameter Store instance is deployed in | `"cn-hangzhou"` |
|
||||
| accessKeyId | Y | The AlibabaCloud Access Key ID to access this resource | `"accessKeyId"` |
|
||||
| accessKeySecret | Y | The AlibabaCloud Access Key Secret to access this resource | `"accessKeySecret"` |
|
||||
| securityToken | N | The AlibabaCloud Security Token to use | `"securityToken"` |
|
||||
|
||||
## Create an AlibabaCloud OOS Parameter Store instance
|
||||
|
||||
Setup AlibabaCloud OOS Parameter Store using the AlibabaCloud documentation: https://www.alibabacloud.com/help/en/doc-detail/186828.html.
|
||||
|
||||
## Related links
|
||||
|
||||
- [Secrets building block]({{< ref secrets >}})
|
||||
- [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}})
|
||||
- [How-To: Reference secrets in Dapr components]({{< ref component-secrets.md >}})
|
||||
- [Secrets API reference]({{< ref secrets_api.md >}})
|
|
@ -60,3 +60,9 @@ The following stores are supported, at various levels, by the Dapr state managem
|
|||
| [Azure CosmosDB]({{< ref setup-azure-cosmosdb.md >}}) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | Stable | v1 | 1.0 |
|
||||
| [Azure SQL Server]({{< ref setup-sqlserver.md >}}) | ✅ | ✅ | ✅ | ❌ | ✅ | ❌ | Stable | v1 | 1.5 |
|
||||
| [Azure Table Storage]({{< ref setup-azure-tablestorage.md >}}) | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
|
||||
### Oracle Cloud Infrastructure (OCI)
|
||||
|
||||
| Name |CRUD|Transactional|ETag| [TTL]({{< ref state-store-ttl.md >}}) | [Actors]({{< ref howto-actors.md >}}) | [Query]({{< ref howto-state-query-api.md >}}) | Status | Component version | Since |
|
||||
|------------------------------------------------------------------|----|-------------|----|----|----|----|-------|----|-----|
|
||||
| [OCI Object Storage]({{< ref setup-oci-objectstorage.md >}}) | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | Alpha | v1 | 1.6 |
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
---
|
||||
type: docs
|
||||
title: "OCI Object Storage "
|
||||
linkTitle: "OCI Object Storage "
|
||||
description: Detailed information on the OCI Object Storage state store component
|
||||
aliases:
|
||||
- "/operations/components/setup-state-store/supported-state-stores/setup-oci-objectstorage/"
|
||||
---
|
||||
|
||||
## Component format
|
||||
|
||||
To setup OCI Object Storage state store create a component of type `state.oci.objectstorage`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: <NAME>
|
||||
namespace: <NAMESPACE>
|
||||
spec:
|
||||
type: state.oci.objectstorage
|
||||
version: v1
|
||||
metadata:
|
||||
- name: instancePrincipalAuthentication
|
||||
value: <"true" or "false"> # Optional. default: "false"
|
||||
- name: configFileAuthentication
|
||||
value: <"true" or "false"> # Optional. default: "false" . Not used when instancePrincipalAuthentication == "true"
|
||||
- name: configFilePath
|
||||
value: <REPLACE-WITH-FULL-QUALIFIED-PATH-OF-CONFIG-FILE> # Optional. default: the operating system specific default location for the OCI config file; on Linux: "~/.oci/config" . Only used when configFileAuthentication == "true"
|
||||
- name: configFileProfile
|
||||
value: <REPLACE-WITH-NAME-OF-PROFILE-IN-CONFIG-FILE> # Optional. default: "DEFAULT" . Only used when configFileAuthentication == "true"
|
||||
- name: tenancyOCID
|
||||
value: <REPLACE-WITH-TENANCY-OCID> # Not used when configFileAuthentication == "true" or instancePrincipalAuthentication == "true"
|
||||
- name: userOCID
|
||||
value: <REPLACE-WITH-USER-OCID> # Not used when configFileAuthentication == "true" or instancePrincipalAuthentication == "true"
|
||||
- name: fingerPrint
|
||||
value: <REPLACE-WITH-FINGERPRINT> # Not used when configFileAuthentication == "true" or instancePrincipalAuthentication == "true"
|
||||
- name: privateKey # Not used when configFileAuthentication == "true" or instancePrincipalAuthentication == "true"
|
||||
value: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
REPLACE-WIH-PRIVATE-KEY-AS-IN-PEM-FILE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
- name: region
|
||||
value: <REPLACE-WITH-OCI-REGION> # Not used when configFileAuthentication == "true" or instancePrincipalAuthentication == "true"
|
||||
- name: bucketName
|
||||
value: <REPLACE-WITH-BUCKET-NAME>
|
||||
- name: compartmentOCID
|
||||
value: <REPLACE-WITH-COMPARTMENT-OCID>
|
||||
|
||||
```
|
||||
|
||||
{{% alert title="Warning" color="warning" %}}
|
||||
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
|
||||
{{% /alert %}}
|
||||
|
||||
## Spec metadata fields
|
||||
|
||||
| Field | Required | Details | Example |
|
||||
|--------------------|:--------:|---------|---------|
|
||||
| instancePrincipalAuthentication | N | Boolean to indicate whether instance principal based authentication is used. Default: `"false"` | `"true"` or `"false"` .
|
||||
| configFileAuthentication | N | Boolean to indicate whether identity credential details are provided through a configuration file. Default: `"false"` Not required nor used when instancePrincipalAuthentication is true. | `"true"` or `"false"` .
|
||||
| configFilePath | N | Full path name to the OCI configuration file. Default: the default location on your operating system for the OCI confile file, for example `"~/.oci/config"` on Linux. Not used when instancePrincipalAuthentication is true. | `"/home/apps/configuration-files/myOCIConfig.txt"`.
|
||||
| configFileProfile | N | Name of profile in configuration file to use. Default: `"DEFAULT"` Not used when instancePrincipalAuthentication is true. | `"DEFAULT"` or `"PRODUCTION"` .
|
||||
| tenancyOCID | Y | The OCI tenancy identifier. Not required nor used when instancePrincipalAuthentication is true. | `"ocid1.tenancy.oc1..aaaaaaaag7c7sljhsdjhsdyuwe723"`.
|
||||
| userOCID | Y | The OCID for an OCI account (this account requires permissions to access OCI Object Storage). Not required nor used when instancePrincipalAuthentication is true.| `"ocid1.user.oc1..aaaaaaaaby4oyyyuqwy7623yuwe76"`
|
||||
| fingerPrint | Y | Fingerprint of the public key. Not required nor used when instancePrincipalAuthentication is true. | `"02:91:6c:49:e2:94:21:15:a7:6b:0e:a7:34:e1:3d:1b"`
|
||||
| privateKey | Y | Private key of the RSA key pair. Not required nor used when instancePrincipalAuthentication is true. | `"MIIEoyuweHAFGFG2727as+7BTwQRAIW4V"`
|
||||
| region | Y | OCI Region. Not required nor used when instancePrincipalAuthentication is true. | `"us-ashburn-1"`
|
||||
| bucketName | Y | Name of the bucket written to and read from (and if necessary created) | `"application-state-store-bucket"`
|
||||
| compartmentOCID | Y | The OCID for the compartment that contains the bucket | `"ocid1.compartment.oc1..aaaaaaaacsssekayyuq7asjh78"`
|
||||
|
||||
## Setup OCI Object Storage
|
||||
The OCI Object Storage state store needs to interact with Oracle Cloud Infrastructure. The state store supports two different approaches to authentication. One is based on an identity (a user or service account) and the other is instance principal authentication leveraging the permissions granted to the compute instance running the application workload. Note: Resource Principal Authentication - used for resources that are not instances such as serverless functions - is not currently supported.
|
||||
|
||||
Dapr-applications running on Oracle Cloud Infrastructure - in a compute instance or as a container on Kubernetes - can leverage instance principal authentication. See the [OCI documentation on calling OCI Services from instances](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm) for more background. In short: The instance needs to be member of a Dynamic Group and this Dynamic Group needs to get permissions for interacting with the Object Storage service through IAM policies. In case of such instance principal authentication, specify property instancePrincipalAuthentication as `"true"`. You do not need to configure the properties tenancyOCID, userOCID, region, fingerPrint and privateKey - these will be ignored if you define values for them.
|
||||
|
||||
Identity based authentication interacts with OCI through an OCI account that has permissions to create, read and delete objects through OCI Object Storage in the indicated bucket and that is allowed to create a bucket in the specified compartment if the bucket is not created beforehand. The OCI documentation [describes how to create an OCI Account](https://docs.oracle.com/en-us/iaas/Content/GSG/Tasks/addingusers.htm#Adding_Users). The interaction by the state store is performed using the public key's fingerprint and a private key from an RSA Key Pair generated for the OCI account. The [instructions for generating the key pair and getting hold of the required information](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/apisigningkey.htm) are available in the OCI documentation.
|
||||
|
||||
Details for the identity and identity's credentials to be used for interaction with OCI can be provided directly in the Dapr component properties file - using the properties tenancyOCID, userOCID, fingerPrint, privateKey and region - or can be provided from a configuration file as is common for many OCI related tools (such as CLI and Terraform) and SDKs. In the latter case, a default configuration file can be assumed (such as ~/.oci/config on Linux) or the exact file name and path can be provided through property configFilePath. A configuration file can contain multiple profiles; the desired profile can be specified through property configFileProfile. If no value is provided, DEFAULT is used as the name for the profile to be used. Note: if the indicated profile is not found, then the DEFAULT profile (if it exists) is used instead. The OCI SDK documentation gives [details about the definition of the configuration file](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm).
|
||||
|
||||
If you wish to create the bucket for Dapr to use, you can do so beforehand. However, Object Storage state provider will create one - in the specified compartment - for you automatically if it doesn't exist.
|
||||
|
||||
In order to setup OCI Object Storage as a state store, you need the following properties:
|
||||
- **instancePrincipalAuthentication**: The flag that indicates if instance principal based authentication should be used.
|
||||
- **configFileAuthentication**: The flag that indicates if the OCI identity credential details are provided through a configuration file. Not used when **instancePrincipalAuthentication** is true.
|
||||
- **configFilePath**: Full path name to the OCI configuration file. Not used when **instancePrincipalAuthentication** is true or **configFileAuthentication** is not true.
|
||||
- **configFileProfile**: Name of profile in configuration file to use. Default: `"DEFAULT"` Not required nor used when instancePrincipalAuthentication is true or **configFileAuthentication** is not true. When the specified profile is not found in the configuration file, the DEFAULT profile is used when it exists
|
||||
- **tenancyOCID**: The identifier for the OCI cloud tenancy in which the state is to be stored. Not used when **instancePrincipalAuthentication** is true or **configFileAuthentication** is true.
|
||||
- **userOCID**: The identifier for the account used by the state store component to connect to OCI; this must be an account with appropriate permissions on the OCI Object Storage service in the specified compartment and bucket. Not used when **instancePrincipalAuthentication** is true or **configFileAuthentication** is true.
|
||||
- **fingerPrint**: The fingerprint for the public key in the RSA key pair generated for the account indicated by **userOCID**. Not used when **instancePrincipalAuthentication** is true or **configFileAuthentication** is true.
|
||||
- **privateKey**: The private key in the RSA key pair generated for the account indicated by **userOCID**. Not used when **instancePrincipalAuthentication** is true or **configFileAuthentication** is true.
|
||||
- **region**: The OCI region - for example **us-ashburn-1**, **eu-amsterdam-1**, **ap-mumbai-1**. Not used when **instancePrincipalAuthentication** is true
|
||||
- **bucketName**: The name of the bucket on OCI Object Storage in which state will be created. This bucket can exist already when the state store is initialized or it will be created during initialization of the state store. Note that the name of buckets is unique within a namespace
|
||||
- **compartmentOCID**: The identifier of the compartment within the tenancy in which the bucket exists or will be created.
|
||||
|
||||
|
||||
## What Happens at Runtime?
|
||||
|
||||
Every state entry is represented by an object in OCI Object Storage. The OCI Object Storage state store uses the `key` property provided in the requests to the Dapr API to determine the name of the object. The `value` is stored as the (literal) content of the object. Each object is assigned a unique ETag value - whenever it is created or updated (aka overwritten); this is native behavior of OCI Object Storage. The state store assigns a meta data tag to every object it writes; the tag is __category__ and its value is __dapr-state-store__. This allows the objects created as state for Daprized applications to be identified.
|
||||
|
||||
For example, the following operation
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3500/v1.0/state \
|
||||
-H "Content-Type: application/json"
|
||||
-d '[
|
||||
{
|
||||
"key": "nihilus",
|
||||
"value": "darth"
|
||||
}
|
||||
]'
|
||||
```
|
||||
|
||||
creates the following object:
|
||||
|
||||
| Bucket | Directory | Object Name | Object Content | Meta Tags |
|
||||
| ------------ | ------- | ----- | ----- | ---- |
|
||||
| as specified with **bucketName** in components.yaml | - (root) | nihilus | darth | category: dapr-state-store
|
||||
|
||||
|
||||
Dapr uses a fixed key scheme with *composite keys* to partition state across applications. For general states, the key format is:
|
||||
`App-ID||state key`
|
||||
The OCI Object Storage state store maps the first key segment (for App-ID) to a directory within a bucket, using the [Prefixes and Hierarchy used for simulating a directory structure as described in the OCI Object Storage documentation](https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/managingobjects.htm#nameprefix).
|
||||
|
||||
The following operation therefore (notice the composite key)
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3500/v1.0/state \
|
||||
-H "Content-Type: application/json"
|
||||
-d '[
|
||||
{
|
||||
"key": "myApplication||nihilus",
|
||||
"value": "darth"
|
||||
}
|
||||
]'
|
||||
```
|
||||
|
||||
will create the following object:
|
||||
|
||||
| Bucket | Directory | Object Name | Object Content | Meta Tags |
|
||||
| ------------ | ------- | ----- | ----- | ---- |
|
||||
| as specified with **bucketName** in components.yaml | myApplication | nihilus | darth | category: dapr-state-store
|
||||
|
||||
|
||||
You will be able to inspect all state stored through the OCI Object Storage state store by inspecting the contents of the bucket through the console, the APIs, CLI or SDKs. By going directly to the bucket, you can prepare state that will be available as state to your application at runtime.
|
||||
|
||||
## Time To Live and State Expiration
|
||||
The OCI Object Storage state store supports Dapr's Time To Live logic that ensure that state cannot be retrieved after it has expired. See [this How To on Setting State Time To Live]({{< ref "state-store-ttl.md" >}}) for details.
|
||||
|
||||
OCI Object Storage does not have native support for a Time To Live setting. The implementation in this component uses a meta data tag put on each object for which a TTL has been specified. The tag is called **expiry-time-from-ttl** and it contains a string in ISO date time format with the UTC based expiry time. When state is retrieved through a call to Get, this component checks if it has the **expiry-time-from-ttl** set and if so it checks whether it is in the past. In that case, no state is returned.
|
||||
|
||||
The following operation therefore (notice the composite key)
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:3500/v1.0/state \
|
||||
-H "Content-Type: application/json"
|
||||
-d '[
|
||||
{
|
||||
"key": "temporary",
|
||||
"value": "ephemeral",
|
||||
"metadata": {"ttlInSeconds": "120"}}
|
||||
}
|
||||
]'
|
||||
```
|
||||
|
||||
creates the following object:
|
||||
|
||||
| Bucket | Directory | Object Name | Object Content | Meta Tags |
|
||||
| ------------ | ------- | ----- | ----- | ---- |
|
||||
| as specified with **bucketName** in components.yaml | - | nihilus | darth | category: dapr-state-store , expiry-time-from-ttl: 2022-01-06T08:34:32
|
||||
|
||||
The exact value of the expiry-time-from-ttl depends of course on the time at which the state was created and will be 120 seconds later than that moment.
|
||||
|
||||
|
||||
Note that expired state is not removed from the state store by this component. An application operator may decide to run a periodic job that does a form of garbage collection in order to explicitly remove all state that has an **expiry-time-from-ttl** label with a timestamp in the past.
|
||||
|
||||
## Concurrency
|
||||
|
||||
OCI Object Storage state concurrency is achieved by using `ETag`s. Each object in OCI Object Storage is assigned a unique ETag when it is created or updated (aka replaced). When the `Set` and `Delete` requests for this state store specify the FirstWrite concurrency policy, then the request need to provide the actual ETag value for the state to be written or removed for the request to be successful.
|
||||
|
||||
## Consistency
|
||||
|
||||
OCI Object Storage state does not support Transactions.
|
||||
|
||||
## Query
|
||||
|
||||
OCI Object Storage state does not support the Query API.
|
||||
|
||||
|
||||
## Related links
|
||||
- [Basic schema for a Dapr component]({{< ref component-schema >}})
|
||||
- Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components
|
||||
- [State management building block]({{< ref state-management >}})
|
|
@ -1 +1 @@
|
|||
{{- if .Get "short" }}1.5{{ else if .Get "long" }}1.5.1{{ else if .Get "cli" }}1.5.1{{ else }}1.5.1{{ end -}}
|
||||
{{- if .Get "short" }}1.6{{ else if .Get "long" }}1.6.0{{ else if .Get "cli" }}1.6.0{{ else }}1.6.0{{ end -}}
|
||||
|
|
|
@ -71,7 +71,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: otel-collector
|
||||
image: otel/opentelemetry-collector-contrib-dev:latest
|
||||
image: otel/opentelemetry-collector-contrib:0.40.0
|
||||
command:
|
||||
- "/otelcontribcol"
|
||||
- "--config=/conf/otel-collector-config.yaml"
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 113 KiB |
|
@ -1 +1 @@
|
|||
Subproject commit 2ffbb113e7b5186a96ee38426a2c08526e83b0e0
|
||||
Subproject commit b47c63ac140845b178a1b29b1e988e30e4c7b579
|
|
@ -1 +1 @@
|
|||
Subproject commit d3df194bad3826069b7c9cda5178196e92dacad1
|
||||
Subproject commit db33b48fd4af80f638d4fa8713b557e43cabec49
|
|
@ -1 +1 @@
|
|||
Subproject commit 1e23f32eafdebe571db6e19717cf5317f09a5402
|
||||
Subproject commit 18a72819a6b620e889ae4b5beecba100ee65ee34
|
|
@ -1 +1 @@
|
|||
Subproject commit 058cfcf4d603823c5916bb5ae533bb9f5bb862fd
|
||||
Subproject commit 6d7d9400736d2c58901c7b49f666a159f987e789
|
Loading…
Reference in New Issue