Merge branch 'v1.15' into resiliency-error-code-retries-docs

This commit is contained in:
Anton Troshin 2024-11-26 11:21:08 -06:00 committed by GitHub
commit d6d83dd441
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
47 changed files with 577 additions and 166 deletions

View File

@ -1,109 +0,0 @@
name: Azure Static Web App Root
on:
workflow_dispatch:
push:
branches:
- v1.14
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v1.14
concurrency:
# Cancel the previously triggered build for only PR build.
group: website-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
build_and_deploy_job:
name: Build Hugo Website
if: github.event.action != 'closed'
runs-on: ubuntu-latest
env:
SWA_BASE: 'proud-bay-0e9e0e81e'
HUGO_ENV: production
steps:
- name: Checkout docs repo
uses: actions/checkout@v3
with:
submodules: true
- name: Setup Node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Setup Hugo
uses: peaceiris/actions-hugo@v2.5.0
with:
hugo-version: 0.102.3
extended: true
- name: Setup Docsy
run: |
cd daprdocs
git submodule update --init --recursive
sudo npm install -D --save autoprefixer
sudo npm install -D --save postcss-cli
- name: Build Hugo Website
run: |
cd daprdocs
git config --global --add safe.directory /github/workspace
if [ $GITHUB_EVENT_NAME == 'pull_request' ]; then
STAGING_URL="https://${SWA_BASE}-${{github.event.number}}.westus2.azurestaticapps.net/"
fi
hugo ${STAGING_URL+-b "$STAGING_URL"}
- name: Deploy docs site
uses: Azure/static-web-apps-deploy@v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
repo_token: ${{ secrets.GITHUB_TOKEN }}
action: "upload"
app_location: "daprdocs/public/"
api_location: "daprdocs/public/"
output_location: ""
skip_app_build: true
skip_deploy_on_missing_secrets: true
- name: Upload Hugo artifacts
uses: actions/upload-artifact@v3
with:
name: hugo_build
path: ./daprdocs/public/
if-no-files-found: error
close_staging_site:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
steps:
- name: Close Pull Request
id: closepullrequest
uses: Azure/static-web-apps-deploy@v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }}
action: "close"
skip_deploy_on_missing_secrets: true
algolia_index:
name: Index site for Algolia
if: github.event_name == 'push'
needs: ['build_and_deploy_job']
runs-on: ubuntu-latest
env:
ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }}
ALGOLIA_API_WRITE_KEY: ${{ secrets.ALGOLIA_API_WRITE_KEY }}
ALGOLIA_INDEX_NAME: daprdocs
steps:
- name: Checkout docs repo
uses: actions/checkout@v2
with:
submodules: false
- name: Download Hugo artifacts
uses: actions/download-artifact@v4.1.7
with:
name: hugo_build
path: site/
- name: Install Python packages
run: |
pip install --upgrade bs4
pip install --upgrade 'algoliasearch>=2.0,<3.0'
- name: Index site
run: python ./.github/scripts/algolia.py ./site

View File

@ -1,14 +1,14 @@
name: Azure Static Web App v1.14
name: Azure Static Web App v1.15
on:
workflow_dispatch:
push:
branches:
- v1.14
- v1.15
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v1.14
- v1.15
jobs:
build_and_deploy_job:
@ -29,7 +29,7 @@ jobs:
HUGO_ENV: production
HUGO_VERSION: "0.100.2"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_14 }}
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_15 }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
skip_deploy_on_missing_secrets: true
action: "upload"
@ -50,6 +50,6 @@ jobs:
id: closepullrequest
uses: Azure/static-web-apps-deploy@v1
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_14 }}
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_V1_15 }}
skip_deploy_on_missing_secrets: true
action: "close"

View File

@ -1,5 +1,5 @@
# Site Configuration
baseURL = "https://docs.dapr.io"
baseURL = "https://v1-15.docs.dapr.io"
title = "Dapr Docs"
theme = "docsy"
disableFastRender = true
@ -196,17 +196,17 @@ offlineSearch = false
github_repo = "https://github.com/dapr/docs"
github_project_repo = "https://github.com/dapr/dapr"
github_subdir = "daprdocs"
github_branch = "v1.14"
github_branch = "v1.15"
# Versioning
version_menu = "v1.14 (latest)"
version = "v1.14"
version_menu = "v1.15 (preview)"
version = "v1.15"
archived_version = false
url_latest_version = "https://docs.dapr.io"
[[params.versions]]
version = "v1.15 (preview)"
url = "https://v1-15.docs.dapr.io"
url = "#"
[[params.versions]]
version = "v1.14 (latest)"
url = "https://docs.dapr.io"

View File

@ -87,6 +87,13 @@ you tackle the challenges that come with building microservices and keeps your c
<a href="{{< ref contributing >}}" class="stretched-link"></a>
</div>
</div>
<div class="card">
<div class="card-body">
<h5 class="card-title"><b>Roadmap</b></h5>
<p class="card-text">Learn about Dapr's roadmap and change process.</p>
<a href="{{< ref roadmap.md >}}" class="stretched-link"></a>
</div>
</div>
</div>

View File

@ -108,7 +108,7 @@ Deploying and running a Dapr-enabled application into your Kubernetes cluster is
### Clusters of physical or virtual machines
The Dapr control plane services can be deployed in high availability (HA) mode to clusters of physical or virtual machines in production. In the diagram below, the Actor `Placement` and security `Sentry` services are started on three different VMs to provide HA control plane. In order to provide name resolution using DNS for the applications running in the cluster, Dapr uses [Hashicorp Consul service]({{< ref setup-nr-consul >}}), also running in HA mode.
The Dapr control plane services can be deployed in high availability (HA) mode to clusters of physical or virtual machines in production. In the diagram below, the Actor `Placement` and security `Sentry` services are started on three different VMs to provide HA control plane. In order to provide name resolution using DNS for the applications running in the cluster, Dapr uses multicast DNS by default, but can also optionally support [Hashicorp Consul service]({{< ref setup-nr-consul >}}).
<img src="/images/overview-vms-hosting.png" width=1200 alt="Architecture diagram of Dapr control plane and Consul deployed to VMs in high availability mode">

View File

@ -18,7 +18,7 @@ See the [Dapr community repository](https://github.com/dapr/community) for more
1. **Docs**: This [repository](https://github.com/dapr/docs) contains the documentation for Dapr. You can contribute by updating existing documentation, fixing errors, or adding new content to improve user experience and clarity. Please see the specific guidelines for [docs contributions]({{< ref contributing-docs >}}).
2. **Quickstarts**: The Quickstarts [repository](https://github.com/dapr/quickstarts) provides simple, step-by-step guides to help users get started with Dapr quickly. Contributions in this repository involve creating new quickstarts, improving existing ones, or ensuring they stay up-to-date with the latest features.
2. **Quickstarts**: The Quickstarts [repository](https://github.com/dapr/quickstarts) provides simple, step-by-step guides to help users get started with Dapr quickly. [Contributions in this repository](https://github.com/dapr/quickstarts/blob/master/CONTRIBUTING.md) involve creating new quickstarts, improving existing ones, or ensuring they stay up-to-date with the latest features.
3. **Runtime**: The Dapr runtime [repository](https://github.com/dapr/dapr) houses the core runtime components. Here, you can contribute by fixing bugs, optimizing performance, implementing new features, or enhancing existing ones.

View File

@ -2,7 +2,7 @@
type: docs
title: "Dapr bot reference"
linkTitle: "Dapr bot"
weight: 15
weight: 70
description: "List of Dapr bot capabilities."
---

View File

@ -41,15 +41,18 @@ Style and tone conventions should be followed throughout all Dapr documentation
## Diagrams and images
Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/presentations), which includes guidance on style and icons.
Diagrams and images are invaluable visual aids for documentation pages. Use the diagram style and icons in the [Dapr Diagrams template deck](https://github.com/dapr/docs/tree/v1.14/daprdocs/static/presentations).
As you create diagrams for your documentation:
The process for creating diagrams for your documentation:
- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/images).
- Name your PNG files using the convention of a concept or building block so that they are grouped.
1. Download the [Dapr Diagrams template deck](https://github.com/dapr/docs/tree/v1.14/daprdocs/static/presentations) to use the icons and colors.
1. Add a new slide and create your diagram.
1. Screen capture the diagram as high-res PNG file and save in the [images folder](https://github.com/dapr/docs/tree/v1.14/daprdocs/static/images).
1. Name your PNG files using the convention of a concept or building block so that they are grouped.
- For example: `service-invocation-overview.png`.
- For more information on calling out images using shortcode, see the [Images guidance](#images) section below.
- Add the diagram to the correct section in the `Dapr-Diagrams.pptx` deck so that they can be amended and updated during routine refresh.
1. Add the diagram to the appropriate section in your documentation using the HTML `<image>` tag.
1. In your PR, comment the diagram slide (not the screen capture) so it can be reviewed and added to the diagram deck by maintainers.
## Contributing a new docs page

View File

@ -94,6 +94,75 @@ In this example, at trigger time, which is `@every 1s` according to the `Schedul
At the trigger time, the `prodDBBackupHandler` function is called, executing the desired business logic for this job at trigger time. For example:
#### HTTP
When you create a job using Dapr's Jobs API, Dapr will automatically assume there is an endpoint available at
`/job/<job-name>`. For instance, if you schedule a job named `test`, Dapr expects your application to listen for job
events at `/job/test`. Ensure your application has a handler set up for this endpoint to process the job when it is
triggered. For example:
*Note: The following example is in Go but applies to any programming language.*
```go
func main() {
...
http.HandleFunc("/job/", handleJob)
http.HandleFunc("/job/<job-name>", specificJob)
...
}
func specificJob(w http.ResponseWriter, r *http.Request) {
// Handle specific triggered job
}
func handleJob(w http.ResponseWriter, r *http.Request) {
// Handle the triggered jobs
}
```
#### gRPC
When a job reaches its scheduled trigger time, the triggered job is sent back to the application via the following
callback function:
*Note: The following example is in Go but applies to any programming language with gRPC support.*
```go
import rtv1 "github.com/dapr/dapr/pkg/proto/runtime/v1"
...
func (s *JobService) OnJobEventAlpha1(ctx context.Context, in *rtv1.JobEventRequest) (*rtv1.JobEventResponse, error) {
// Handle the triggered job
}
```
This function processes the triggered jobs within the context of your gRPC server. When you set up the server, ensure that
you register the callback server, which will invoke this function when a job is triggered:
```go
...
js := &JobService{}
rtv1.RegisterAppCallbackAlphaServer(server, js)
```
In this setup, you have full control over how triggered jobs are received and processed, as they are routed directly
through this gRPC method.
#### SDKs
For SDK users, handling triggered jobs is simpler. When a job is triggered, Dapr will automatically route the job to the
event handler you set up during the server initialization. For example, in Go, you'd register the event handler like this:
```go
...
if err = server.AddJobEventHandler("prod-db-backup", prodDBBackupHandler); err != nil {
log.Fatalf("failed to register job event handler: %v", err)
}
```
Dapr takes care of the underlying routing. When the job is triggered, your `prodDBBackupHandler` function is called with
the triggered job data. Heres an example of handling the triggered job:
```go
// ...
@ -144,4 +213,4 @@ dapr run --app-id=distributed-scheduler \
## Next steps
- [Learn more about the Scheduler control plane service]({{< ref "concepts/dapr-services/scheduler.md" >}})
- [Jobs API reference]({{< ref jobs_api.md >}})
- [Jobs API reference]({{< ref jobs_api.md >}})

View File

@ -37,17 +37,16 @@ metadata:
spec:
topic: orders
routes:
default: /checkout
default: /orders
pubsubname: pubsub
scopes:
- orderprocessing
- checkout
```
Here the subscription called `order`:
- Uses the pub/sub component called `pubsub` to subscribes to the topic called `orders`.
- Sets the `route` field to send all topic messages to the `/checkout` endpoint in the app.
- Sets `scopes` field to scope this subscription for access only by apps with IDs `orderprocessing` and `checkout`.
- Sets the `route` field to send all topic messages to the `/orders` endpoint in the app.
- Sets `scopes` field to scope this subscription for access only by apps with ID `orderprocessing`.
When running Dapr, set the YAML component file path to point Dapr to the component.
@ -113,7 +112,7 @@ In your application code, subscribe to the topic specified in the Dapr pub/sub c
```csharp
//Subscribe to a topic
[HttpPost("checkout")]
[HttpPost("orders")]
public void getCheckout([FromBody] int orderId)
{
Console.WriteLine("Subscriber received : " + orderId);
@ -128,7 +127,7 @@ public void getCheckout([FromBody] int orderId)
import io.dapr.client.domain.CloudEvent;
//Subscribe to a topic
@PostMapping(path = "/checkout")
@PostMapping(path = "/orders")
public Mono<Void> getCheckout(@RequestBody(required = false) CloudEvent<String> cloudEvent) {
return Mono.fromRunnable(() -> {
try {
@ -146,7 +145,7 @@ public Mono<Void> getCheckout(@RequestBody(required = false) CloudEvent<String>
from cloudevents.sdk.event import v1
#Subscribe to a topic
@app.route('/checkout', methods=['POST'])
@app.route('/orders', methods=['POST'])
def checkout(event: v1.Event) -> None:
data = json.loads(event.Data())
logging.info('Subscriber received: ' + str(data))
@ -163,7 +162,7 @@ const app = express()
app.use(bodyParser.json({ type: 'application/*+json' }));
// listen to the declarative route
app.post('/checkout', (req, res) => {
app.post('/orders', (req, res) => {
console.log(req.body);
res.sendStatus(200);
});
@ -178,7 +177,7 @@ app.post('/checkout', (req, res) => {
var sub = &common.Subscription{
PubsubName: "pubsub",
Topic: "orders",
Route: "/checkout",
Route: "/orders",
}
func eventHandler(ctx context.Context, e *common.TopicEvent) (retry bool, err error) {
@ -191,7 +190,7 @@ func eventHandler(ctx context.Context, e *common.TopicEvent) (retry bool, err er
{{< /tabs >}}
The `/checkout` endpoint matches the `route` defined in the subscriptions and this is where Dapr sends all topic messages to.
The `/orders` endpoint matches the `route` defined in the subscriptions and this is where Dapr sends all topic messages to.
### Streaming subscriptions
@ -325,7 +324,7 @@ In the example below, you define the values found in the [declarative YAML subsc
```csharp
[Topic("pubsub", "orders")]
[HttpPost("/checkout")]
[HttpPost("/orders")]
public async Task<ActionResult<Order>>Checkout(Order order, [FromServices] DaprClient daprClient)
{
// Logic
@ -337,7 +336,7 @@ or
```csharp
// Dapr subscription in [Topic] routes orders topic to this route
app.MapPost("/checkout", [Topic("pubsub", "orders")] (Order order) => {
app.MapPost("/orders", [Topic("pubsub", "orders")] (Order order) => {
Console.WriteLine("Subscriber received : " + order);
return Results.Ok(order);
});
@ -359,7 +358,7 @@ app.UseEndpoints(endpoints =>
```java
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@Topic(name = "checkout", pubsubName = "pubsub")
@Topic(name = "orders", pubsubName = "pubsub")
@PostMapping(path = "/orders")
public Mono<Void> handleMessage(@RequestBody(required = false) CloudEvent<String> cloudEvent) {
return Mono.fromRunnable(() -> {
@ -370,6 +369,7 @@ public Mono<Void> handleMessage(@RequestBody(required = false) CloudEvent<String
throw new RuntimeException(e);
}
});
}
```
{{% /codetab %}}
@ -382,7 +382,7 @@ def subscribe():
subscriptions = [
{
'pubsubname': 'pubsub',
'topic': 'checkout',
'topic': 'orders',
'routes': {
'rules': [
{
@ -418,7 +418,7 @@ app.get('/dapr/subscribe', (req, res) => {
res.json([
{
pubsubname: "pubsub",
topic: "checkout",
topic: "orders",
routes: {
rules: [
{
@ -480,7 +480,7 @@ func configureSubscribeHandler(w http.ResponseWriter, _ *http.Request) {
t := []subscription{
{
PubsubName: "pubsub",
Topic: "checkout",
Topic: "orders",
Routes: routes{
Rules: []rule{
{

View File

@ -10,8 +10,6 @@ State management is one of the most common needs of any new, legacy, monolith, o
In this guide, you'll learn the basics of using the key/value state API to allow an application to save, get, and delete state.
## Example
The code example below _loosely_ describes an application that processes orders with an order processing service which has a Dapr sidecar. The order processing service uses Dapr to store state in a Redis state store.
<img src="/images/building-block-state-management-example.png" width=1000 alt="Diagram showing state management of example service">
@ -554,7 +552,7 @@ namespace EventService
string DAPR_STORE_NAME = "statestore";
//Using Dapr SDK to retrieve multiple states
using var client = new DaprClientBuilder().Build();
IReadOnlyList<BulkStateItem> mulitpleStateResult = await client.GetBulkStateAsync(DAPR_STORE_NAME, new List<string> { "order_1", "order_2" }, parallelism: 1);
IReadOnlyList<BulkStateItem> multipleStateResult = await client.GetBulkStateAsync(DAPR_STORE_NAME, new List<string> { "order_1", "order_2" }, parallelism: 1);
}
}
}

View File

@ -135,7 +135,7 @@ Because workflow retry policies are configured in code, the exact developer expe
| --- | --- |
| **Maximum number of attempts** | The maximum number of times to execute the activity or child workflow. |
| **First retry interval** | The amount of time to wait before the first retry. |
| **Backoff coefficient** | The amount of time to wait before each subsequent retry. |
| **Backoff coefficient** | The coefficient used to determine the rate of increase of back-off. For example a coefficient of 2 doubles the wait of each subsequent retry. |
| **Maximum retry interval** | The maximum amount of time to wait before each subsequent retry. |
| **Retry timeout** | The overall timeout for retries, regardless of any configured max number of attempts. |

View File

@ -10,6 +10,10 @@ description: Get started with the Dapr Workflow building block
Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
{{% /alert %}}
{{% alert title="Note" color="primary" %}}
Redis is currently used as the state store component for Workflows in the Quickstarts. However, Redis does not support transaction rollbacks and should not be used in production as an actor state store.
{{% /alert %}}
Let's take a look at the Dapr [Workflow building block]({{< ref workflow-overview.md >}}). In this Quickstart, you'll create a simple console application to demonstrate Dapr's workflow programming model and the workflow management APIs.
In this guide, you'll:
@ -1356,4 +1360,4 @@ Join the discussion in our [discord channel](https://discord.com/channels/778680
- Walk through a more in-depth [.NET SDK example workflow](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- Learn more about [Workflow as a Dapr building block]({{< ref workflow-overview >}})
{{< button text="Explore Dapr tutorials >>" page="getting-started/tutorials/_index.md" >}}
{{< button text="Explore Dapr tutorials >>" page="getting-started/tutorials/_index.md" >}}

View File

@ -0,0 +1,122 @@
---
type: docs
title: "How-To: Configure Environment Variables from Secrets for Dapr sidecar"
linkTitle: "Environment Variables from Secrets"
weight: 7500
description: "Inject Environment Variables from Kubernetes Secrets into Dapr sidecar"
---
In special cases, the Dapr sidecar needs an environment variable injected into it. This use case may be required by a component, a 3rd party library, or a module that uses environment variables to configure the said component or customize its behavior. This can be useful for both production and non-production environments.
## Overview
In Dapr 1.15, the new `dapr.io/env-from-secret` annotation was introduced, [similar to `dapr.io/env`]({{< ref arguments-annotations-overview >}}).
With this annotation, you can inject an environment variable into the Dapr sidecar, with a value from a secret.
### Annotation format
The values of this annotation are formatted like so:
- Single key secret: `<ENV_VAR_NAME>=<SECRET_NAME>`
- Multi key/value secret: `<ENV_VAR_NAME>=<SECRET_NAME>:<SECRET_KEY>`
`<ENV_VAR_NAME>` is required to follow the `C_IDENTIFIER` format and captured by the `[A-Za-z_][A-Za-z0-9_]*` regex:
- Must start with a letter or underscore
- The rest of the identifier contains letters, digits, or underscores
The `name` field is required due to the restriction of the `secretKeyRef`, so both `name` and `key` must be set. [Learn more from the "env.valueFrom.secretKeyRef.name" section in this Kubernetes documentation.](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables)
In this case, Dapr sets both to the same value.
## Configuring single key secret environment variable
In the following example, the `dapr.io/env-from-secret` annotation is added to the Deployment.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nodeapp
spec:
template:
metadata:
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "nodeapp"
dapr.io/app-port: "3000"
dapr.io/env-from-secret: "AUTH_TOKEN=auth-headers-secret"
spec:
containers:
- name: node
image: dapriosamples/hello-k8s-node:latest
ports:
- containerPort: 3000
imagePullPolicy: Always
```
The `dapr.io/env-from-secret` annotation with a value of `"AUTH_TOKEN=auth-headers-secret"` is injected as:
```yaml
env:
- name: AUTH_TOKEN
valueFrom:
secretKeyRef:
name: auth-headers-secret
key: auth-headers-secret
```
This requires the secret to have both `name` and `key` fields with the same value, "auth-headers-secret".
**Example secret**
> **Note:** The following example is for demo purposes only. It's not recommended to store secrets in plain text.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: auth-headers-secret
type: Opaque
stringData:
auth-headers-secret: "AUTH=mykey"
```
## Configuring multi-key secret environment variable
In the following example, the `dapr.io/env-from-secret` annotation is added to the Deployment.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nodeapp
spec:
template:
metadata:
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "nodeapp"
dapr.io/app-port: "3000"
dapr.io/env-from-secret: "AUTH_TOKEN=auth-headers-secret:auth-header-value"
spec:
containers:
- name: node
image: dapriosamples/hello-k8s-node:latest
ports:
- containerPort: 3000
imagePullPolicy: Always
```
The `dapr.io/env-from-secret` annotation with a value of `"AUTH_TOKEN=auth-headers-secret:auth-header-value"` is injected as:
```yaml
env:
- name: AUTH_TOKEN
valueFrom:
secretKeyRef:
name: auth-headers-secret
key: auth-header-value
```
**Example secret**
> **Note:** The following example is for demo purposes only. It's not recommended to store secrets in plain text.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: auth-headers-secret
type: Opaque
stringData:
auth-header-value: "AUTH=mykey"
```

View File

@ -4,10 +4,15 @@ title: "How-To: Limit the secrets that can be read from secret stores"
linkTitle: "Limit secret store access"
weight: 3000
description: "Define secret scopes by augmenting the existing configuration resource with restrictive permissions."
description: "Define secret scopes by augmenting the existing configuration resource with restrictive permissions."
---
In addition to [scoping which applications can access a given component]({{< ref "component-scopes.md">}}), you can also scop a named secret store component to one or more secrets for an application. By defining `allowedSecrets` and/or `deniedSecrets` lists, you restrict applications to access only specific secrets.
In addition to [scoping which applications can access a given component]({{< ref "component-scopes.md">}}), you can also scop a named secret store component to one or more secrets for an application. By defining `allowedSecrets` and/or `deniedSecrets` lists, you restrict applications to access only specific secrets.
For more information about configuring a Configuration resource:
- [Configuration overview]({{< ref configuration-overview.md >}})
- [Configuration schema]({{< ref configuration-schema.md >}})
For more information about configuring a Configuration resource:
- [Configuration overview]({{< ref configuration-overview.md >}})
- [Configuration schema]({{< ref configuration-schema.md >}})
@ -40,7 +45,7 @@ When an `allowedSecrets` list is present with at least one element, only those s
## Permission priority
The `allowedSecrets` and `deniedSecrets` list values take priorty over the `defaultAccess`. See how this works in the following example scenarios:
The `allowedSecrets` and `deniedSecrets` list values take priority over the `defaultAccess`. See how this works in the following example scenarios:
| | Scenarios | `defaultAccess` | `allowedSecrets` | `deniedSecrets` | `permission`
|--| ----- | ------- | -----------| ----------| ------------
@ -55,8 +60,10 @@ The `allowedSecrets` and `deniedSecrets` list values take priorty over the `defa
### Scenario 1: Deny access to all secrets for a secret store
In a Kubernetes cluster, the native Kubernetes secret store is added to your Dapr application by default. In some scenarios, it may be necessary to deny access to Dapr secrets for a given application. To add this configuration:
In a Kubernetes cluster, the native Kubernetes secret store is added to your Dapr application by default. In some scenarios, it may be necessary to deny access to Dapr secrets for a given application. To add this configuration:
1. Define the following `appconfig.yaml`.
1. Define the following `appconfig.yaml`.
```yaml
@ -70,6 +77,17 @@ In a Kubernetes cluster, the native Kubernetes secret store is added to your Dap
- storeName: kubernetes
defaultAccess: deny
```
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: appconfig
spec:
secrets:
scopes:
- storeName: kubernetes
defaultAccess: deny
```
1. Apply it to the Kubernetes cluster using the following command:
@ -77,6 +95,13 @@ In a Kubernetes cluster, the native Kubernetes secret store is added to your Dap
kubectl apply -f appconfig.yaml`.
```
For applications that you need to deny access to the Kubernetes secret store, follow [the Kubernetes instructions]({{< ref kubernetes-overview >}}), adding the following annotation to the application pod.
1. Apply it to the Kubernetes cluster using the following command:
```bash
kubectl apply -f appconfig.yaml`.
```
For applications that you need to deny access to the Kubernetes secret store, follow [the Kubernetes instructions]({{< ref kubernetes-overview >}}), adding the following annotation to the application pod.
```yaml
@ -85,6 +110,7 @@ dapr.io/config: appconfig
With this defined, the application no longer has access to Kubernetes secret store.
### Scenario 2: Allow access to only certain secrets in a secret store
### Scenario 2: Allow access to only certain secrets in a secret store
To allow a Dapr application to have access to only certain secrets, define the following `config.yaml`:
@ -102,6 +128,7 @@ spec:
allowedSecrets: ["secret1", "secret2"]
```
This example defines configuration for secret store named `vault`. The default access to the secret store is `deny`. Meanwhile, some secrets are accessible by the application based on the `allowedSecrets` list. Follow [the Sidecar configuration instructions]({{< ref "configuration-overview.md#sidecar-configuration" >}}) to apply configuration to the sidecar.
This example defines configuration for secret store named `vault`. The default access to the secret store is `deny`. Meanwhile, some secrets are accessible by the application based on the `allowedSecrets` list. Follow [the Sidecar configuration instructions]({{< ref "configuration-overview.md#sidecar-configuration" >}}) to apply configuration to the sidecar.
### Scenario 3: Deny access to certain sensitive secrets in a secret store
@ -126,3 +153,8 @@ This configuration explicitly denies access to `secret1` and `secret2` from the
## Next steps
{{< button text="Service invocation access control" page="invoke-allowlist" >}}
This configuration explicitly denies access to `secret1` and `secret2` from the secret store named `vault,` while allowing access to all other secrets. Follow [the Sidecar configuration instructions]({{< ref "configuration-overview.md#sidecar-configuration" >}}) to apply configuration to the sidecar.
## Next steps
{{< button text="Service invocation access control" page="invoke-allowlist" >}}

View File

@ -260,6 +260,22 @@ Verify your production-ready deployment includes the following settings:
1. Dapr supports and is enabled to **scope components for certain applications**. This is not a required practice. [Learn more about component scopes]({{< ref "component-scopes.md" >}}).
## Recommended Placement service configuration
The [Placement service]({{< ref "placement.md" >}}) is a component in Dapr, responsible for disseminating information about actor addresses to all Dapr sidecars via a placement table (more information on this can be found [here]({{< ref "actors-features-concepts.md#actor-placement-service" >}})).
When running in production, it's recommended to configure the Placement service with the following values:
1. **High availability**. Ensure the Placement service is highly available (three replicas) and can survive individual node failures. Helm chart value: `dapr_placement.ha=true`
2. **In-memory logs**. Use in-memory Raft log store for faster writes. The tradeoff is more placement table disseminations (and thus, network traffic) in an eventual Placement service pod failure. Helm chart value: `dapr_placement.cluster.forceInMemoryLog=true`
3. **No metadata endpoint**. Disable the unauthenticated `/placement/state` endpoint which exposes placement table information for the Placement service. Helm chart value: `dapr_placement.metadataEnabled=false`
4. **Timeouts** Control the sensitivity of network connectivity between the Placement service and the sidecars using the below timeout values. Default values are set, but you can adjust these based on your network conditions.
1. `dapr_placement.keepAliveTime` sets the interval at which the Placement service sends [keep alive](https://grpc.io/docs/guides/keepalive/) pings to Dapr sidecars on the gRPC stream to check if the connection is still alive. Lower values will lead to shorter actor rebalancing time in case of pod loss/restart, but higher network traffic during normal operation. Accepts values between `1s` and `10s`. Default is `2s`.
2. `dapr_placement.keepAliveTimeout` sets the timeout period for Dapr sidecars to respond to the Placement service's [keep alive](https://grpc.io/docs/guides/keepalive/) pings before the Placement service closes the connection. Lower values will lead to shorter actor rebalancing time in case of pod loss/restart, but higher network traffic during normal operation. Accepts values between `1s` and `10s`. Default is `3s`.
3. `dapr_placement.disseminateTimeout` sets the timeout period for dissemination to be delayed after actor membership change (usually related to pod restarts) to avoid excessive dissemination during multiple pod restarts. Higher values will reduce the frequency of dissemination, but delay the table dissemination. Accepts values between `1s` and `3s`. Default is `2s`.
## Service account tokens
By default, Kubernetes mounts a volume containing a [Service Account token](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in each container. Applications can use this token, whose permissions vary depending on the configuration of the cluster and namespace, among other things, to perform API calls against the Kubernetes control plane.

View File

@ -24,7 +24,7 @@ A supported release means:
From the 1.8.0 release onwards three (3) versions of Dapr are supported; the current and previous two (2) versions. Typically these are `MINOR`release updates. This means that there is a rolling window that moves forward for supported releases and it is your operational responsibility to remain up to date with these supported versions. If you have an older version of Dapr you may have to do intermediate upgrades to get to a supported version.
There will be at least 6 weeks between major.minor version releases giving users a 12 week (3 month) rolling window for upgrading.
There will be at least 13 weeks (3 months) between major.minor version releases giving users at least a 9 month rolling window for upgrading from a non-supported version. For more details on the release process read [release cycle and cadence](https://github.com/dapr/community/blob/master/release-process.md)
Patch support is for supported versions (current and previous).

View File

@ -16,15 +16,17 @@ This table is meant to help users understand the equivalent options for running
| `--app-id` | `--app-id` | `-i` | `dapr.io/app-id` | The unique ID of the application. Used for service discovery, state encapsulation and the pub/sub consumer ID |
| `--app-port` | `--app-port` | `-p` | `dapr.io/app-port` | This parameter tells Dapr which port your application is listening on |
| `--components-path` | `--components-path` | `-d` | not supported | **Deprecated** in favor of `--resources-path` |
| `--resources-path` | `--resources-path` | `-d` | not supported | Path for components directory. If empty, components will not be loaded. |
| `--resources-path` | `--resources-path` | `-d` | not supported | Path for components directory. If empty, components will not be loaded |
| `--config` | `--config` | `-c` | `dapr.io/config` | Tells Dapr which Configuration resource to use |
| `--control-plane-address` | not supported | | not supported | Address for a Dapr control plane |
| `--dapr-grpc-port` | `--dapr-grpc-port` | | not supported | gRPC port for the Dapr API to listen on (default "50001") |
| `--dapr-http-port` | `--dapr-http-port` | | not supported | The HTTP port for the Dapr API |
| `--dapr-http-max-request-size` | --dapr-http-max-request-size | | `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB |
| `--dapr-http-read-buffer-size` | --dapr-http-read-buffer-size | | `dapr.io/http-read-buffer-size` | Increasing max size of http header read buffer in KB to handle when sending multi-KB headers. The default 4 KB. When sending bigger than default 4KB http headers, you should set this to a larger value, for example 16 (for 16KB) |
| `--dapr-grpc-port` | `--dapr-grpc-port` | | `dapr.io/grpc-port` | Sets the Dapr API gRPC port (default `50001`); all cluster services must use the same port for communication |
| `--dapr-http-port` | `--dapr-http-port` | | not supported | HTTP port for the Dapr API to listen on (default `3500`) |
| `--dapr-http-max-request-size` | `--dapr-http-max-request-size` | | `dapr.io/http-max-request-size` | **Deprecated** in favor of `--max-body-size`. Inreasing the request max body size to handle large file uploads using http and grpc protocols. Default is `4` MB |
| `--max-body-size` | not supported | | `dapr.io/max-body-size` | Inreasing the request max body size to handle large file uploads using http and grpc protocols. Set the value using size units (e.g., `16Mi` for 16MB). The default is `4Mi` |
| `--dapr-http-read-buffer-size` | `--dapr-http-read-buffer-size` | | `dapr.io/http-read-buffer-size` | **Deprecated** in favor of `--read-buffer-size`. Increasing max size of http header read buffer in KB to to support larger header values, for example `16` to support headers up to 16KB . Default is `16` for 16KB |
| `--read-buffer-size` | not supported | | `dapr.io/read-buffer-size` | Increasing max size of http header read buffer in KB to to support larger header values. Set the value using size units, for example `32Ki` will support headers up to 32KB . Default is `4` for 4KB |
| not supported | `--image` | | `dapr.io/sidecar-image` | Dapr sidecar image. Default is daprio/daprd:latest. The Dapr sidecar uses this image instead of the latest default image. Use this when building your own custom image of Dapr and or [using an alternative stable Dapr image]({{< ref "support-release-policy.md#build-variations" >}}) |
| `--internal-grpc-port` | not supported | | not supported | gRPC port for the Dapr Internal API to listen on |
| `--internal-grpc-port` | not supported | | `dapr.io/internal-grpc-port` | Sets the internal Dapr gRPC port (default `50002`); all cluster services must use the same port for communication |
| `--enable-metrics` | not supported | | configuration spec | Enable [prometheus metric]({{< ref prometheus >}}) (default true) |
| `--enable-mtls` | not supported | | configuration spec | Enables automatic mTLS for daprd to daprd communication channels |
| `--enable-profiling` | `--enable-profiling` | | `dapr.io/enable-profiling` | [Enable profiling]({{< ref profiling-debugging >}}) |
@ -67,6 +69,7 @@ This table is meant to help users understand the equivalent options for running
| not supported | not supported | | `dapr.io/sidecar-readiness-probe-period-seconds` | How often (in seconds) to perform the sidecar readiness probe. Read more [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `6`|
| not supported | not supported | | `dapr.io/sidecar-readiness-probe-threshold` | When the sidecar readiness probe fails, Kubernetes will try N times before giving up. In this case, the Pod will be marked Unready. Read more about `failureThreshold` [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `3`|
| not supported | not supported | | `dapr.io/env` | List of environment variable to be injected into the sidecar. Strings consisting of key=value pairs separated by a comma.|
| not supported | not supported | | `dapr.io/env-from-secret` | List of environment variables to be injected into the sidecar from secret. Strings consisting of `"key=secret-name:secret-key"` pairs are separated by a comma. |
| not supported | not supported | | `dapr.io/volume-mounts` | List of [pod volumes to be mounted to the sidecar container]({{< ref "kubernetes-volume-mounts" >}}) in read-only mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. |
| not supported | not supported | | `dapr.io/volume-mounts-rw` | List of [pod volumes to be mounted to the sidecar container]({{< ref "kubernetes-volume-mounts" >}}) in read-write mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. |
| `--disable-builtin-k8s-secret-store` | not supported | | `dapr.io/disable-builtin-k8s-secret-store` | Disables BuiltIn Kubernetes secret store. Default value is false. See [Kubernetes secret store component]({{< ref "kubernetes-secret-store.md" >}}) for details. |

View File

@ -63,6 +63,10 @@ This component supports **output binding** with the following operations:
- `delete` : [Delete blob](#delete-blob)
- `list`: [List blobs](#list-blobs)
The Blob storage component's **input binding** triggers and pushes events using [Azure Event Grid]({{< ref eventgrid.md >}}).
Refer to the [Reacting to Blob storage events](https://learn.microsoft.com/azure/storage/blobs/storage-blob-event-overview) guide for more set up and more information.
### Create blob
To perform a create blob operation, invoke the Azure Blob Storage binding with a `POST` method and the following JSON body:

View File

@ -90,6 +90,21 @@ This component supports **output binding** with the following operations:
- `create`: publishes a message on the Event Grid topic
## Receiving events
You can use the Event Grid binding to receive events from a variety of sources and actions. [Learn more about all of the available event sources and handlers that work with Event Grid.](https://learn.microsoft.com/azure/event-grid/overview)
In the following table, you can find the list of Dapr components that can raise events.
| Event sources | Dapr components |
| ------------- | --------------- |
| [Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/) | [Azure Blob Storage binding]({{< ref blobstorage.md >}}) <br/>[Azure Blob Storage state store]({{< ref setup-azure-blobstorage.md >}}) |
| [Azure Cache for Redis](https://learn.microsoft.com/azure/azure-cache-for-redis/cache-overview) | [Redis binding]({{< ref redis.md >}}) <br/>[Redis pub/sub]({{< ref setup-redis-pubsub.md >}}) |
| [Azure Event Hubs](https://learn.microsoft.com/azure/event-hubs/event-hubs-about) | [Azure Event Hubs pub/sub]({{< ref setup-azure-eventhubs.md >}}) <br/>[Azure Event Hubs binding]({{< ref eventhubs.md >}}) |
| [Azure IoT Hub](https://learn.microsoft.com/azure/iot-hub/iot-concepts-and-iot-hub) | [Azure Event Hubs pub/sub]({{< ref setup-azure-eventhubs.md >}}) <br/>[Azure Event Hubs binding]({{< ref eventhubs.md >}}) |
| [Azure Service Bus](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-messaging-overview) | [Azure Service Bus binding]({{< ref servicebusqueues.md >}}) <br/>[Azure Service Bus pub/sub topics]({{< ref setup-azure-servicebus-topics.md >}}) and [queues]({{< ref setup-azure-servicebus-queues.md >}}) |
| [Azure SignalR Service](https://learn.microsoft.com/azure/azure-signalr/signalr-overview) | [SignalR binding]({{< ref signalr.md >}}) |
## Microsoft Entra ID credentials
The Azure Event Grid binding requires an Microsoft Entra ID application and service principal for two reasons:
@ -142,7 +157,7 @@ Connect-MgGraph -Scopes "Application.Read.All","Application.ReadWrite.All"
> Note: if your directory does not have a Service Principal for the application "Microsoft.EventGrid", you may need to run the command `Connect-MgGraph` and sign in as an admin for the Microsoft Entra ID tenant (this is related to permissions on the Microsoft Entra ID directory, and not the Azure subscription). Otherwise, please ask your tenant's admin to sign in and run this PowerShell command: `New-MgServicePrincipal -AppId "4962773b-9cdb-44cf-a8bf-237846a00ab7"` (the UUID is a constant)
### Testing locally
## Testing locally
- Install [ngrok](https://ngrok.com/download)
- Run locally using a custom port, for example `9000`, for handshakes
@ -160,7 +175,7 @@ ngrok http --host-header=localhost 9000
dapr run --app-id dotnetwebapi --app-port 5000 --dapr-http-port 3500 dotnet run
```
### Testing on Kubernetes
## Testing on Kubernetes
Azure Event Grid requires a valid HTTPS endpoint for custom webhooks; self-signed certificates aren't accepted. In order to enable traffic from the public internet to your app's Dapr sidecar you need an ingress controller enabled with Dapr. There's a good article on this topic: [Kubernetes NGINX ingress controller with Dapr](https://carlos.mendible.com/2020/04/05/kubernetes-nginx-ingress-controller-with-dapr/).

View File

@ -0,0 +1,231 @@
---
type: docs
title: "SFTP binding spec"
linkTitle: "SFTP"
description: "Detailed documentation on the Secure File Transfer Protocol (SFTP) binding component"
aliases:
- "/operations/components/setup-bindings/supported-bindings/sftp/"
---
## Component format
To set up the SFTP binding, create a component of type `bindings.sftp`. See [this guide]({{ ref bindings-overview.md }}) on how to create and apply a binding configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: <NAME>
spec:
type: bindings.sftp
version: v1
metadata:
- name: rootPath
value: "<string>"
- name: address
value: "<string>"
- name: username
value: "<string>"
- name: password
value: "*****************"
- name: privateKey
value: "*****************"
- name: privateKeyPassphrase
value: "*****************"
- name: hostPublicKey
value: "*****************"
- name: knownHostsFile
value: "<string>"
- name: insecureIgnoreHostKey
value: "<bool>"
```
## Spec metadata fields
| Field | Required | Binding support | Details | Example |
|--------------------|:--------:|------------|-----|---------|
| `rootPath` | Y | Output | Root path for default working directory | `"/path"` |
| `address` | Y | Output | Address of SFTP server | `"localhost:22"` |
| `username` | Y | Output | Username for authentication | `"username"` |
| `password` | N | Output | Password for username/password authentication | `"password"` |
| `privateKey` | N | Output | Private key for public key authentication | <pre>"\|-<br>-----BEGIN OPENSSH PRIVATE KEY-----<br>*****************<br>-----END OPENSSH PRIVATE KEY-----"</pre> |
| `privateKeyPassphrase` | N | Output | Private key passphrase for public key authentication | `"passphrase"` |
| `hostPublicKey` | N | Output | Host public key for host validation | `"ecdsa-sha2-nistp256 *** root@openssh-server"` |
| `knownHostsFile` | N | Output | Known hosts file for host validation | `"/path/file"` |
| `insecureIgnoreHostKey` | N | Output | Allows to skip host validation. Defaults to `"false"` | `"true"`, `"false"` |
## Binding support
This component supports **output binding** with the following operations:
- `create` : [Create file](#create-file)
- `get` : [Get file](#get-file)
- `list` : [List files](#list-files)
- `delete` : [Delete file](#delete-file)
### Create file
To perform a create file operation, invoke the SFTP binding with a `POST` method and the following JSON body:
```json
{
"operation": "create",
"data": "<YOUR_BASE_64_CONTENT>",
"metadata": {
"fileName": "<filename>",
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d "{ \"operation\": \"create\", \"data\": \"YOUR_BASE_64_CONTENT\", \"metadata\": { \"fileName\": \"my-test-file.jpg\" } }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "create", "data": "YOUR_BASE_64_CONTENT", "metadata": { "fileName": "my-test-file.jpg" } }' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
The response body contains the following JSON:
```json
{
"fileName": "<filename>"
}
```
### Get file
To perform a get file operation, invoke the SFTP binding with a `POST` method and the following JSON body:
```json
{
"operation": "get",
"metadata": {
"fileName": "<filename>"
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d '{ \"operation\": \"get\", \"metadata\": { \"fileName\": \"filename\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "get", "metadata": { "fileName": "filename" }}' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
The response body contains the value stored in the file.
### List files
To perform a list files operation, invoke the SFTP binding with a `POST` method and the following JSON body:
```json
{
"operation": "list"
}
```
If you only want to list the files beneath a particular directory below the `rootPath`, specify the relative directory name as the `fileName` in the metadata.
```json
{
"operation": "list",
"metadata": {
"fileName": "my/cool/directory"
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d '{ \"operation\": \"list\", \"metadata\": { \"fileName\": \"my/cool/directory\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "list", "metadata": { "fileName": "my/cool/directory" }}' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
The response is a JSON array of file names.
### Delete file
To perform a delete file operation, invoke the SFTP binding with a `POST` method and the following JSON body:
```json
{
"operation": "delete",
"metadata": {
"fileName": "myfile"
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"fileName\": \"myfile\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "delete", "metadata": { "fileName": "myfile" }}' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
An HTTP 204 (No Content) and empty body is returned if successful.
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- [Bindings building block]({{< ref bindings >}})
- [How-To: Use bindings to interface with external resources]({{< ref howto-bindings.md >}})
- [Bindings API reference]({{< ref bindings_api.md >}})

View File

@ -53,6 +53,12 @@ spec:
value: 2.0.0
- name: disableTls # Optional. Disable TLS. This is not safe for production!! You should read the `Mutual TLS` section for how to use TLS.
value: "true"
- name: consumerFetchMin # Optional. Advanced setting. The minimum number of message bytes to fetch in a request - the broker will wait until at least this many are available.
value: 1
- name: consumerFetchDefault # Optional. Advanced setting. The default number of message bytes to fetch from the broker in each request.
value: 2097152
- name: channelBufferSize # Optional. Advanced setting. The number of events to buffer in internal and external channels.
value: 512
- name: schemaRegistryURL # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry URL.
value: http://localhost:8081
- name: schemaRegistryAPIKey # Optional. When using Schema Registry Avro serialization/deserialization. The Schema Registry API Key.
@ -111,7 +117,9 @@ spec:
| schemaLatestVersionCacheTTL | N | When using Schema Registry Avro serialization/deserialization. The TTL for schema caching when publishing a message with latest schema available. Default is 5 min | `5m` |
| clientConnectionTopicMetadataRefreshInterval | N | The interval for the client connection's topic metadata to be refreshed with the broker as a Go duration. Defaults to `9m`. | `"4m"` |
| clientConnectionKeepAliveInterval | N | The maximum time for the client connection to be kept alive with the broker, as a Go duration, before closing the connection. A zero value (default) means keeping alive indefinitely. | `"4m"` |
| consumerFetchMin | N | The minimum number of message bytes to fetch in a request - the broker will wait until at least this many are available. The default is `1`, as `0` causes the consumer to spin when no messages are available. Equivalent to the JVM's `fetch.min.bytes`. | `"2"` |
| consumerFetchDefault | N | The default number of message bytes to fetch from the broker in each request. Default is `"1048576"` bytes. | `"2097152"` |
| channelBufferSize | N | The number of events to buffer in internal and external channels. This permits the producer and consumer to continue processing some messages in the background while user code is working, greatly improving throughput. Defaults to `256`. | `"512"` |
| heartbeatInterval | N | The interval between heartbeats to the consumer coordinator. At most, the value should be set to a 1/3 of the `sessionTimeout` value. Defaults to "3s". | `"5s"` |
| sessionTimeout | N | The timeout used to detect client failures when using Kafkas group management facility. If the broker fails to receive any heartbeats from the consumer before the expiration of this session timeout, then the consumer is removed and initiates a rebalance. Defaults to "10s". | `"20s"` |
| escapeHeaders | N | Enables URL escaping of the message header values received by the consumer. Allows receiving content with special characters that are usually not allowed in HTTP headers. Default is `false`. | `true` |

View File

@ -134,6 +134,14 @@
features:
input: true
output: false
- component: SFTP
link: sftp
state: Alpha
version: v1
since: "1.15"
features:
input: false
output: true
- component: SMTP
link: smtp
state: Alpha

View File

@ -1,8 +1,8 @@
- component: AWS Secrets Manager
link: aws-secret-manager
state: Alpha
state: Beta
version: v1
since: "1.0"
since: "1.15"
- component: AWS SSM Parameter Store
link: aws-parameter-store
state: Alpha

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

After

Width:  |  Height:  |  Size: 160 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 117 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 215 KiB

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 201 KiB

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 258 KiB

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 221 KiB

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 274 KiB

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

After

Width:  |  Height:  |  Size: 163 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 123 KiB

After

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 228 KiB

After

Width:  |  Height:  |  Size: 185 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 137 KiB

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 348 KiB

After

Width:  |  Height:  |  Size: 222 KiB

@ -1 +1 @@
Subproject commit b8e276728935c66b0a335b5aa2ca4102c560dd3d
Subproject commit 03038fa519670b583eabcef1417eacd55c3e44c8

@ -1 +1 @@
Subproject commit 7c03c7ce58d100a559ac1881bc0c80d6dedc5ab9
Subproject commit dd9a2d5a3c4481b8a6bda032df8f44f5eaedb370

@ -1 +1 @@
Subproject commit a98327e7d9a81611b0d7e91e59ea23ad48271948
Subproject commit 0b7a051b79c7a394e9bd4f57bd40778fb5f29897

@ -1 +1 @@
Subproject commit 7350742b6869cc166633d1f4d17d76fbdbb12921
Subproject commit 76866c878a6e79bb889c83f3930172ddb20f1624

@ -1 +1 @@
Subproject commit 64a4f2f6658e9023e8ea080eefdb019645cae802
Subproject commit 6e90e84b166ac7ea603b78894e9e1b92dc456014