Merge branch 'v1.2' into patch-2

This commit is contained in:
Donovan Brown 2021-04-09 07:35:36 -05:00 committed by GitHub
commit e4fa7c82aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 644 additions and 155 deletions

View File

@ -1,52 +0,0 @@
name: Azure Static Web Apps CI/CD
on:
push:
branches:
- v1.0
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v1.0
jobs:
build_and_deploy_job:
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed')
runs-on: ubuntu-latest
name: Build and Deploy Job
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- name: Setup Docsy
run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli
- name: Build And Deploy
id: builddeploy
uses: Azure/static-web-apps-deploy@v0.0.1-preview
env:
HUGO_ENV: production
HUGO_VERSION: "0.74.3"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_BLACK_WATER_03A7CE11E }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
skip_deploy_on_missing_secrets: true
action: "upload"
###### Repository/Build Configurations - These values can be configured to match you app requirements. ######
app_location: "daprdocs" # App source code path
api_location: "api" # Api source code path - optional
app_artifact_location: 'public' # Built app content directory - optional
app_build_command: "hugo"
###### End of Repository/Build Configurations ######
close_pull_request_job:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
steps:
- name: Close Pull Request
id: closepullrequest
uses: Azure/static-web-apps-deploy@v0.0.1-preview
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_BLACK_WATER_03A7CE11E }}
skip_deploy_on_missing_secrets: true
action: "close"

View File

@ -1,52 +0,0 @@
name: Azure Static Web Apps CI/CD
on:
push:
branches:
- v1.0-rc3
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v1.0-rc3
jobs:
build_and_deploy_job:
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed')
runs-on: ubuntu-latest
name: Build and Deploy Job
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- name: Setup Docsy
run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli
- name: Build And Deploy
id: builddeploy
uses: Azure/static-web-apps-deploy@v0.0.1-preview
env:
HUGO_ENV: production
HUGO_VERSION: "0.74.3"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_KIND_POND_0F48CBE1E }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
skip_deploy_on_missing_secrets: true
action: "upload"
###### Repository/Build Configurations - These values can be configured to match you app requirements. ######
app_location: "daprdocs" # App source code path
api_location: "api" # Api source code path - optional
app_artifact_location: 'public' # Built app content directory - optional
app_build_command: "hugo"
###### End of Repository/Build Configurations ######
close_pull_request_job:
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
name: Close Pull Request Job
steps:
- name: Close Pull Request
id: closepullrequest
uses: Azure/static-web-apps-deploy@v0.0.1-preview
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_KIND_POND_0F48CBE1E }}
skip_deploy_on_missing_secrets: true
action: "close"

View File

@ -3,11 +3,11 @@ name: Azure Static Web Apps CI/CD
on:
push:
branches:
- v1.0-rc2
- v1.2
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v1.0-rc2
- v1.2
jobs:
build_and_deploy_job:
@ -27,15 +27,15 @@ jobs:
HUGO_ENV: production
HUGO_VERSION: "0.74.3"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_POLITE_BUSH_0F42B0A1E }}
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_WONDERFUL_ISLAND_07C05FD1E }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
skip_deploy_on_missing_secrets: true
action: "upload"
###### Repository/Build Configurations - These values can be configured to match you app requirements. ######
###### Repository/Build Configurations - These values can be configured to match your app requirements. ######
# For more information regarding Static Web App workflow configurations, please visit: https://aka.ms/swaworkflowconfig
app_location: "daprdocs" # App source code path
app_location: "/daprdocs" # App source code path
api_location: "api" # Api source code path - optional
app_artifact_location: 'public' # Built app content directory - optional
output_location: "public" # Built app content directory - optional
app_build_command: "hugo"
###### End of Repository/Build Configurations ######
@ -48,7 +48,6 @@ jobs:
id: closepullrequest
uses: Azure/static-web-apps-deploy@v0.0.1-preview
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_POLITE_BUSH_0F42B0A1E }}
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_WONDERFUL_ISLAND_07C05FD1E }}
skip_deploy_on_missing_secrets: true
action: "close"

View File

@ -3,11 +3,11 @@ name: Azure Static Web Apps CI/CD
on:
push:
branches:
- v0.11
- v1.1
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- v0.11
- v1.1
jobs:
build_and_deploy_job:
@ -27,15 +27,15 @@ jobs:
HUGO_ENV: production
HUGO_VERSION: "0.74.3"
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_GREEN_HILL_0D7377310 }}
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_YELLOW_RIVER_084FE4E1E }}
repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments)
skip_deploy_on_missing_secrets: true
action: "upload"
###### Repository/Build Configurations - These values can be configured to match you app requirements. ######
# For more information regarding Static Web App workflow configurations, please visit: https://aka.ms/swaworkflowconfig
app_location: "daprdocs" # App source code path
app_location: "/daprdocs" # App source code path
api_location: "api" # Api source code path - optional
app_artifact_location: 'public' # Built app content directory - optional
output_location: "public" # Built app content directory - optional
app_build_command: "hugo"
###### End of Repository/Build Configurations ######
@ -48,6 +48,6 @@ jobs:
id: closepullrequest
uses: Azure/static-web-apps-deploy@v0.0.1-preview
with:
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_GREEN_HILL_0D7377310 }}
azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_YELLOW_RIVER_084FE4E1E }}
skip_deploy_on_missing_secrets: true
action: "close"

View File

@ -14,8 +14,8 @@ The following branches are currently maintained:
| Branch | Website | Description |
|--------|---------|-------------|
| [v1.0](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here.
| [v1.1](https://github.com/dapr/docs/tree/v1.1) (pre-release) | https://v1-1.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.1+ go here.
| [v1.1](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here.
| [v1.2](https://github.com/dapr/docs/tree/v1.2) (pre-release) | https://v1-2.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.2+ go here.
For more information visit the [Dapr branch structure](https://docs.dapr.io/contributing/contributing-docs/#branch-guidance) document.

View File

@ -1,5 +1,5 @@
# Site Configuration
baseURL = "https://docs.dapr.io/"
baseURL = "https://v1-2.docs.dapr.io/"
title = "Dapr Docs"
theme = "docsy"
disableFastRender = true
@ -133,20 +133,23 @@ offlineSearch = false
github_repo = "https://github.com/dapr/docs"
github_project_repo = "https://github.com/dapr/dapr"
github_subdir = "daprdocs"
github_branch = "v1.0"
github_branch = "v1.2"
# Versioning
version_menu = "v1.0"
version = "v1.0"
archived_version = true
version_menu = "v1.2 (preview)"
version = "v1.2"
archived_version = false
url_latest_version = "https://docs.dapr.io"
[[params.versions]]
version = "v1.2 (preview)"
url = "#"
[[params.versions]]
version = "v1.1 (latest)"
url = "https://docs.dapr.io"
[[params.versions]]
version = "v1.0"
url = "#"
url = "https://v1-0.docs.dapr.io"
[[params.versions]]
version = "v0.11"
url = "https://v0-11.docs.dapr.io"

View File

@ -52,8 +52,8 @@ dapr --version
Output should look like this:
```
CLI version: 1.0.1
Runtime version: 1.0.1
CLI version: 1.1.0
Runtime version: 1.1.0
```
### Step 4: Verify containers are running

View File

@ -29,6 +29,7 @@ Table captions:
| [InfluxDB]({{< ref influxdb.md >}}) | | ✅ | Alpha | v1 | 1.0 |
| [Kafka]({{< ref kafka.md >}}) | ✅ | ✅ | Alpha | v1 | 1.0 |
| [Kubernetes Events]({{< ref "kubernetes-binding.md" >}}) | ✅ | | Alpha | v1 | 1.0 |
| [Local Storage]({{< ref localstorage.md >}}) | | ✅ | Alpha | v1 | 1.1 |
| [MQTT]({{< ref mqtt.md >}}) | ✅ | ✅ | Alpha | v1 | 1.0 |
| [MySQL]({{< ref mysql.md >}}) | | ✅ | Alpha | v1 | 1.0 |
| [PostgreSql]({{< ref postgres.md >}}) | | ✅ | Alpha | v1 | 1.0 |

View File

@ -0,0 +1,266 @@
---
type: docs
title: "Local Storage binding spec"
linkTitle: "Local Storage"
description: "Detailed documentation on the Local Storage binding component"
---
## Component format
To set up the Local Storage binding, create a component of type `bindings.localstorage`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: <NAME>
namespace: <NAMESPACE>
spec:
type: bindings.localstorage
version: v1
metadata:
- name: rootPath
value: <string>
```
## Spec metadata fields
| Field | Required | Binding support | Details | Example |
|--------------------|:--------:|--------|---------|---------|
| rootPath | Y | Input / Output | The root path anchor to which files can be read / saved | `"/temp/files"` |
## Binding support
This component supports **output binding** with the following operations:
- `create` : [Create file](#create-file)
- `get` : [Get file](#get-file)
- `list` : [List files](#list-files)
- `delete` : [Delete file](#delete-file)
### Create file
To perform a create file operation, invoke the Local Storage binding with a `POST` method and the following JSON body:
> Note: by default, a random UUID is generated. See below for Metadata support to set the name
```json
{
"operation": "create",
"data": "YOUR_CONTENT"
}
```
#### Examples
##### Save text to a random generated UUID file
{{< tabs Windows Linux >}}
{{% codetab %}}
On Windows, utilize cmd prompt (PowerShell has different escaping mechanism)
```bash
curl -d "{ \"operation\": \"create\", \"data\": \"Hello World\" }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "create", "data": "Hello World" }' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
##### Save text to a specific file
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d "{ \"operation\": \"create\", \"data\": \"Hello World\", \"metadata\": { \"fileName\": \"my-test-file.txt\" } }" \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "create", "data": "Hello World", "metadata": { "fileName": "my-test-file.txt" } }' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
##### Save a binary file
To upload a file, encode it as Base64. The binding should automatically detect the Base64 encoding.
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d "{ \"operation\": \"create\", \"data\": \"YOUR_BASE_64_CONTENT\", \"metadata\": { \"fileName\": \"my-test-file.jpg\" } }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "create", "data": "YOUR_BASE_64_CONTENT", "metadata": { "fileName": "my-test-file.jpg" } }' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
The response body will contain the following JSON:
```json
{
"fileName": "<filename>"
}
```
### Get file
To perform a get file operation, invoke the Local Storage binding with a `POST` method and the following JSON body:
```json
{
"operation": "get",
"metadata": {
"fileName": "myfile"
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d '{ \"operation\": \"get\", \"metadata\": { \"fileName\": \"myfile\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "get", "metadata": { "fileName": "myfile" }}' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
The response body contains the value stored in the file.
### List files
To perform a list files operation, invoke the Local Storage binding with a `POST` method and the following JSON body:
```json
{
"operation": "list"
}
```
If you only want to list the files beneath a particular directory below the `rootPath`, specify the relative directory name as the `fileName` in the metadata.
```json
{
"operation": "list",
"metadata": {
"fileName": "my/cool/directory"
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d '{ \"operation\": \"list\", \"metadata\": { \"fileName\": \"my/cool/directory\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "list", "metadata": { "fileName": "my/cool/directory" }}' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
The response is a JSON array of file names.
### Delete file
To perform a delete file operation, invoke the Local Storage binding with a `POST` method and the following JSON body:
```json
{
"operation": "delete",
"metadata": {
"fileName": "myfile"
}
}
```
#### Example
{{< tabs Windows Linux >}}
{{% codetab %}}
```bash
curl -d '{ \"operation\": \"delete\", \"metadata\": { \"fileName\": \"myfile\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{% codetab %}}
```bash
curl -d '{ "operation": "delete", "metadata": { "fileName": "myfile" }}' \
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
```
{{% /codetab %}}
{{< /tabs >}}
#### Response
An HTTP 204 (No Content) and empty body will be returned if successful.
## Metadata information
By default the Local Storage output binding auto generates a UUID as the file name. It is configurable in the metadata property of the message.
```json
{
"data": "file content",
"metadata": {
"fileName": "filename.txt"
},
"operation": "create"
}
```
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- [Bindings building block]({{< ref bindings >}})
- [How-To: Trigger application with input binding]({{< ref howto-triggers.md >}})
- [How-To: Use bindings to interface with external resources]({{< ref howto-bindings.md >}})
- [Bindings API reference]({{< ref bindings_api.md >}})

View File

@ -44,6 +44,10 @@ spec:
value: 10
- name: autoDeleteOnIdleInSec # Optional
value: 10
- name: maxReconnectionAttempts # Optional
value: 30
- name: connectionRecoveryInSec # Optional
value: 2
```
> __NOTE:__ The above settings are shared across all topics that use this component.
@ -69,6 +73,8 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| prefetchCount | N |Defines the number of prefetched messages (use for high throughput / low latency scenarios)| `5`
| defaultMessageTimeToLiveInSec | N |Default message time to live. | `10`
| autoDeleteOnIdleInSec | N |Time in seconds to wait before auto deleting messages. | `10`
| maxReconnectionAttempts | N |Defines the maximum number of reconnect attempts. Default: `30` | `30`
| connectionRecoveryInSec | N |Time in seconds to wait between connection recovery attempts. Defaults: `2` | `2`
## Create an Azure Service Bus

View File

@ -31,6 +31,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
| connectionString | Y | A comma delimited string of servers. Example: "hazelcast:3000,hazelcast2:3000" | `"hazelcast:3000,hazelcast2:3000"`
| backOffMaxRetries | N | The maximum number of retries to process the message before returning an error. Defaults to `"0"` which means the component will not retry processing the message. `"-1"` will retry indefinitely until the message is processed or the application is shutdown. And positive number is treated as the maximum retry count. The component will wait 5 seconds between retries. | `"3"` |
## Create a Hazelcast instance

View File

@ -39,6 +39,7 @@ spec:
| caCert | Required for using TLS | Certificate authority certificate. Can be `secretKeyRef` to use a secret reference | `0123456789-0123456789`
| clientCert | Required for using TLS | Client certificate. Can be `secretKeyRef` to use a secret reference | `0123456789-0123456789`
| clientKey | Required for using TLS | Client key. Can be `secretKeyRef` to use a secret reference | `012345`
| backOffMaxRetries | N | The maximum number of retries to process the message before returning an error. Defaults to `"0"` which means the component will not retry processing the message. `"-1"` will retry indefinitely until the message is processed or the application is shutdown. And positive number is treated as the maximum retry count. The component will wait 5 seconds between retries. | `"3"`
### Communication using TLS

View File

@ -39,8 +39,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr
|--------------------|:--------:|---------|---------|
| redisHost | Y | Connection-string for the redis host | `localhost:6379`, `redis-master.default.svc.cluster.local:6379`
| redisPassword | Y | Password for Redis host. No Default. Can be `secretKeyRef` to use a secret reference | `""`, `"KeFg23!"`
| consumerID | N | The consumer group ID | `"myGroup"`
| enableTLS | N | If the Redis instance supports TLS with public certificates, can be configured to be enabled or disabled. Defaults to `"false"` | `"true"`, `"false"`
| consumerID | N | The consumer group ID | `"myGroup"`
| enableTLS | N | If the Redis instance supports TLS with public certificates, can be configured to be enabled or disabled. Defaults to `"false"` | `"true"`, `"false"`
| redeliverInterval | N | The interval between checking for pending messages to redelivery. Defaults to `"60s"`. `"0"` disables redelivery. | `"30s"`
| processingTimeout | N | The amount time a message must be pending before attempting to redeliver it. Defaults to `"15s"`. `"0"` disables redelivery. | `"30s"`
| queueDepth | N | The size of the message queue for processing. Defaults to `"100"`. | `"1000"`
| concurrency | N | The number of concurrent workers that are processing messages. Defaults to `"10"`. | `"15"`
## Create a Redis instance

View File

@ -31,6 +31,7 @@ Table captions:
| Name | Status | Component version | Since |
|----------------------------------------------------------|--------| -------------------| ---- |
| [AWS Secrets Manager]({{< ref aws-secret-manager.md >}}) | Alpha | v1 | 1.0 |
| [AWS SSM Parameter Store]({{< ref aws-parameter-store.md >}}) | Alpha | v1 | 1.1 |
### Google Cloud Platform (GCP)

View File

@ -0,0 +1,54 @@
---
type: docs
title: "AWS SSM Parameter Store"
linkTitle: "AWS SSM Parameter Store"
description: Detailed information on the AWS SSM Parameter Store - secret store component
---
## Component format
To setup AWS SSM Parameter Store secret store create a component of type `secretstores.aws.parameterstore`. See [this guide]({{< ref "secret-stores-overview.md#apply-the-configuration" >}}) on how to create and apply a secretstore configuration. See this guide on [referencing secrets]({{< ref component-secrets.md >}}) to retrieve and use the secret with Dapr components.
See [Authenticating to AWS]({{< ref authenticating-aws.md >}}) for information about authentication-related attributes.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: awsparameterstore
namespace: default
spec:
type: secretstores.aws.parameterstore
version: v1
metadata:
- name: region
value: "[aws_region]"
- name: accessKey
value: "[aws_access_key]"
- name: secretKey
value: "[aws_secret_key]"
- name: sessionToken
value: "[aws_session_token]"
```
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a local secret store such as [Kubernetes secret store]({{< ref kubernetes-secret-store.md >}}) or a [local file]({{< ref file-secret-store.md >}}) to bootstrap secure key storage.
{{% /alert %}}
## Spec metadata fields
| Field | Required | Details | Example |
|--------------------|:--------:|-------------------------------------------------------------------------|---------------------|
| region | Y | The specific AWS region the AWS SSM Parameter Store instance is deployed in | `"us-east-1"` |
| accessKey | Y | The AWS Access Key to access this resource | `"key"` |
| secretKey | Y | The AWS Secret Access Key to access this resource | `"secretAccessKey"` |
| sessionToken | N | The AWS session token to use | `"sessionToken"` |
## Create an AWS SSM Parameter Store instance
Setup AWS SSM Parameter Store using the AWS documentation: https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html.
## Related links
- [Secrets building block]({{< ref secrets >}})
- [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}})
- [How-To: Reference secrets in Dapr components]({{< ref component-secrets.md >}})
- [Secrets API reference]({{< ref secrets_api.md >}})
- [Authenticating to AWS]({{< ref authenticating-aws.md >}})

View File

@ -2,7 +2,7 @@
type: docs
title: "Kubernetes cluster setup"
linkTitle: "How-to: Setup clusters"
weight: 70000
weight: 80000
description: >
How to create a Kubernetes cluster
---

View File

@ -35,3 +35,4 @@ The following table shows all the supported pod Spec annotations supported by Da
| `dapr.io/sidecar-readiness-probe-period-seconds` | How often (in seconds) to perform the sidecar readiness probe. Read more [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `6`
| `dapr.io/sidecar-readiness-probe-threshold` | When the sidecar readiness probe fails, Kubernetes will try N times before giving up. In this case, the Pod will be marked Unready. Read more about `failureThreshold` [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `3`
| `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB
| `dapr.io/env` | List of environment variable to be injected into the sidecar. Strings consisting of key=value pairs separated by a comma.

View File

@ -65,7 +65,6 @@ The default namespace when initializing Dapr is `dapr-system`. You can override
dapr init -k -n mynamespace
```
### Install in highly available mode
You can run Dapr with 3 replicas of each control plane pod in the dapr-system namespace for [production scenarios]({{< ref kubernetes-production.md >}}).
@ -82,6 +81,16 @@ Dapr is initialized by default with [mTLS]({{< ref "security-concept.md#sidecar-
dapr init -k --enable-mtls=false
```
### Wait for the installation to complete
You can wait for the installation to complete its deployment with the `--wait` flag.
The default timeout is 300s (5 min), but can be customized with the `--timeout` flag.
```bash
dapr init -k --wait --timeout 600
```
### Uninstall Dapr on Kubernetes with CLI
Run the following command on your local machine to uninstall Dapr on your cluster:
@ -113,7 +122,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
```bash
helm upgrade --install dapr dapr/dapr \
--version=1.0.1 \
--version=1.1.0 \
--namespace dapr-system \
--create-namespace \
--wait
@ -123,7 +132,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
```bash
helm upgrade --install dapr dapr/dapr \
--version=1.0.1 \
--version=1.1.0 \
--namespace dapr-system \
--create-namespace \
--set global.ha.enabled=true \

View File

@ -0,0 +1,60 @@
---
type: docs
title: "Running Dapr with a Kubernetes Job"
linkTitle: "Kubernetes Jobs"
weight: 70000
description: "Use Dapr API in a Kubernetes Job context"
type: docs
---
# Kubernetes Job
The Dapr sidecar is designed to be a long running process, in the context of a [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) this behaviour can block your job completion.
To address this issue the Dapr sidecar has an endpoint to `Shutdown` the sidecar.
When running a basic [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) you will need to call the `/shutdown` endpoint for the sidecar to gracefully stop and the job will be considered `Completed`.
When a job is finish without calling `Shutdown` your job will be in a `NotReady` state with only the `daprd` container running endlessly.
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: job-with-shutdown
spec:
template:
metadata:
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "with-shutdown"
spec:
containers:
- name: job
image: busybox
command: ["/bin/sh", "-c", "sleep 20 && wget localhost:3500/v1.0/shutdown"]
restartPolicy: Never
```
You can also call the `Shutdown` from any of the Dapr SDK
```go
package main
import (
"context"
"log"
"os"
dapr "github.com/dapr/go-sdk/client"
)
func main() {
client, err := dapr.NewClient()
if err != nil {
log.Panic(err)
}
defer client.Close()
defer client.Shutdown()
// Job
}
```

View File

@ -11,12 +11,12 @@ description: "Follow these steps to upgrade Dapr on Kubernetes and ensure a smoo
- [Dapr CLI]({{< ref install-dapr-cli.md >}})
- [Helm 3](https://github.com/helm/helm/releases) (if using Helm)
## Upgrade existing cluster to 1.0.1
## Upgrade existing cluster to 1.1.0
There are two ways to upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm.
### Dapr CLI
The example below shows how to upgrade to version 1.0.1:
The example below shows how to upgrade to version 1.1.0:
```bash
dapr upgrade -k --runtime-version=1.0.1
@ -25,6 +25,26 @@ The example below shows how to upgrade to version 1.0.1:
You can provide all the available Helm chart configurations using the Dapr CLI.
See [here](https://github.com/dapr/cli#supplying-helm-values) for more info.
#### Troubleshooting upgrade using the CLI
There is a known issue running upgrades on clusters that may have previously had a version prior to 1.0.0-rc.2 installed on a cluster.
Most users should not encounter this issue, but there are a few upgrade path edge cases that may leave an incompatible CustomResourceDefinition installed on your cluster. The error message for this case looks like this:
```
❌ Failed to upgrade Dapr: Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
The CustomResourceDefinition "configurations.dapr.io" is invalid: spec.preserveUnknownFields: Invalid value: true: must be false in order to use defaults in the schema
```
To resolve this issue please run the follow command to upgrade the CustomResourceDefinition to a compatible version:
```
kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/5a15b3e0f093d2d0938b12f144c7047474a290fe/charts/dapr/crds/configuration.yaml
```
Then proceed with the `dapr upgrade --runtime-version 1.1.0 -k` command as above.
### Helm
From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive action since existing certificate values will automatically be re-used.
@ -64,4 +84,4 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive
## Next steps
- [Dapr on Kubernetes]({{< ref kubernetes-overview.md >}})
- [Dapr production guidelines]({{< ref kubernetes-production.md >}})
- [Dapr production guidelines]({{< ref kubernetes-production.md >}})

View File

@ -25,11 +25,11 @@ description: "Follow these steps to upgrade Dapr in self-hosted mode and ensure
dapr init
```
1. Ensure you are using the latest version of Dapr (1.0.1) with:
1. Ensure you are using the latest version of Dapr (v1.1.0) with:
```bash
$ dapr --version
CLI version: 1.0.0
Runtime version: 1.0.1
CLI version: 1.1.0
Runtime version: 1.1.0
```

View File

@ -86,7 +86,7 @@ spec:
## Log collectors
If you run Dapr in a Kubernetes cluster, [Fluentd](https://www.fluentd.org/) is a popular container log collector. You can use Fluentd with a [json parser plugin](https://docs.fluentd.org/parser/json) to parse Dapr JSON formatted logs. This [how-to]({{< ref fluentd.md >}}) shows how to configure the Fleuntd in your cluster.
If you run Dapr in a Kubernetes cluster, [Fluentd](https://www.fluentd.org/) is a popular container log collector. You can use Fluentd with a [json parser plugin](https://docs.fluentd.org/parser/json) to parse Dapr JSON formatted logs. This [how-to]({{< ref fluentd.md >}}) shows how to configure the Fluentd in your cluster.
If you are using the Azure Kubernetes Service, you can use the default OMS Agent to collect logs with Azure Monitor without needing to install Fluentd.

View File

@ -0,0 +1,78 @@
---
type: docs
title: "How-To: Set-up New Relic for Dapr logging"
linkTitle: "New Relic"
weight: 2000
description: "Set-up New Relic for Dapr logging"
---
## Prerequisites
- Perpetually [free New Relic account](https://newrelic.com/signup?ref=dapr), 100 GB/month of free data ingest, 1 free full access user, unlimited free basic users
## Background
New Relic offers a [Fluent Bit](https://fluentbit.io/) output [plugin](https://github.com/newrelic/newrelic-fluent-bit-output) to easily forward your logs to [New Relic Logs](https://github.com/newrelic/newrelic-fluent-bit-output). This plugin is also provided in a standalone Docker image that can be installed in a Kubernetes cluster in the form of a DaemonSet, which we refer as the Kubernetes plugin.
This document explains how to install it in your cluster, either using a Helm chart (recommended), or manually by applying Kubernetes manifests.
## Installation
### Install using the Helm chart (recommended)
1. Install Helm following the official instructions.
2. Add the New Relic official Helm chart repository following these instructions
3. Run the following command to install the New Relic Logging Kubernetes plugin via Helm, replacing the placeholder value YOUR_LICENSE_KEY with your [New Relic license key](https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key):
- Helm 3
```bash
helm install newrelic-logging newrelic/newrelic-logging --set licenseKey=YOUR_LICENSE_KEY
```
- Helm 2
```bash
helm install newrelic/newrelic-logging --name newrelic-logging --set licenseKey=YOUR_LICENSE_KEY
```
For EU users, add `--set endpoint=https://log-api.eu.newrelic.com/log/v1 to any of the helm install commands above.
By default, tailing is set to /var/log/containers/*.log. To change this setting, provide your preferred path by adding --set fluentBit.path=DESIRED_PATH to any of the helm install commands above.
### Install the Kubernetes manifest
1. Download the following 3 manifest files into your current working directory:
```bash
curl https://raw.githubusercontent.com/newrelic/helm-charts/master/charts/newrelic-logging/k8s/fluent-conf.yml > fluent-conf.yml
curl https://raw.githubusercontent.com/newrelic/helm-charts/master/charts/newrelic-logging/k8s/new-relic-fluent-plugin.yml > new-relic-fluent-plugin.yml
curl https://raw.githubusercontent.com/newrelic/helm-charts/master/charts/newrelic-logging/k8s/rbac.yml > rbac.yml
```
2. In the downloaded new-relic-fluent-plugin.yml file, replace the placeholder value LICENSE_KEY with your New Relic license key.
For EU users, replace the ENDPOINT environment variable to https://log-api.eu.newrelic.com/log/v1.
3. Once the License key has been added, run the following command in your terminal or command-line interface:
```bash
kubectl apply -f .
```
4. [OPTIONAL] You can configure how the plugin parses the data by editing the parsers.conf section in the fluent-conf.yml file. For more information, see Fluent Bit's documentation on Parsers configuration.
By default, tailing is set to /var/log/containers/*.log. To change this setting, replace the default path with your preferred path in the new-relic-fluent-plugin.yml file.
## View Logs
![Dapr Annotations](/images/nr-logging-1.png)
![Search](/images/nr-logging-2.png)
## Related Links/References
* [New Relic Account Signup](https://newrelic.com/signup)
* [Telemetry Data Platform](https://newrelic.com/platform/telemetry-data-platform)
* [New Relic Logging](https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging)
* [Types of New Relic API keys](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys)
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence)

View File

@ -3,7 +3,7 @@ type: docs
title: "Metrics"
linkTitle: "Metrics"
weight: 4000
description: "Observing Dapr metrics"
description: "Observing Dapr metrics in Kubernetes"
---
Dapr exposes a [Prometheus](https://prometheus.io/) metrics endpoint that you can scrape to gain a greater understanding of how Dapr is behaving and to setup alerts for specific conditions.
@ -12,9 +12,40 @@ Dapr exposes a [Prometheus](https://prometheus.io/) metrics endpoint that you ca
The metrics endpoint is enabled by default, you can disable it by passing the command line argument `--enable-metrics=false` to Dapr system processes.
The default metrics port is `9090`. This can be overridden by passing the command line argument `--metrics-port` to Daprd.
The default metrics port is `9090`. This can be overridden by passing the command line argument `--metrics-port` to Daprd. Additionally, the metrics exporter can be disabled for a specific application by setting the `dapr.io/enable-metrics: "false"` annotation to your application deployment. With the metrics exporter disabled, `daprd` will not open the metrics listening port.
To disable the metrics in the Dapr side car, you can use the `metric` spec configuration and set `enabled: false` to disable the metrics in the Dapr runtime.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nodeapp
labels:
app: node
spec:
replicas: 1
selector:
matchLabels:
app: node
template:
metadata:
labels:
app: node
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "nodeapp"
dapr.io/app-port: "3000"
dapr.io/enable-metrics: "true"
dapr.io/metrics-port: "9090"
spec:
containers:
- name: node
image: dapriosamples/hello-k8s-node:latest
ports:
- containerPort: 3000
imagePullPolicy: Always
```
To disable the metrics collection in the Dapr side cars running in a specific namespace, you can use the `metric` spec configuration and set `enabled: false` to disable the metrics in the Dapr runtime.
```yaml
apiVersion: dapr.io/v1alpha1
@ -26,7 +57,7 @@ spec:
tracing:
samplingRate: "1"
metric:
enabled: false
enabled: true
```
## Metrics

View File

@ -0,0 +1,43 @@
---
type: docs
title: "How-To: Set-up New Relic to collect and analyze metrics"
linkTitle: "New Relic"
weight: 6000
description: "Set-up New Relic for Dapr metrics"
---
## Prerequisites
- Perpetually [free New Relic account](https://newrelic.com/signup?ref=dapr), 100 GB/month of free data ingest, 1 free full access user, unlimited free basic users
## Background
New Relic offers a Prometheus OpenMetrics Integration.
This document explains how to install it in your cluster, either using a Helm chart (recommended).
## Installation
1. Install Helm following the official instructions.
2. Add the New Relic official Helm chart repository following [these instructions](https://github.com/newrelic/helm-charts/blob/master/README.md#installing-charts)
3. Run the following command to install the New Relic Logging Kubernetes plugin via Helm, replacing the placeholder value YOUR_LICENSE_KEY with your [New Relic license key](https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key):
```bash
helm install nri-prometheus newrelic/nri-prometheus --set licenseKey=YOUR_LICENSE_KEY
```
## View Metrics
![Dapr Metrics](/images/nr-metrics-1.png)
![Dashboard](/images/nr-dashboard-dapr-metrics-1.png)
## Related Links/References
* [New Relic Account Signup](https://newrelic.com/signup)
* [Telemetry Data Platform](https://newrelic.com/platform/telemetry-data-platform)
* [New Relic Prometheus OpenMetrics Integration](https://github.com/newrelic/helm-charts/tree/master/charts/nri-prometheus)
* [Types of New Relic API keys](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys)
* [Alerts and Applied Intelligence](https://docs.newrelic.com/docs/alerts-applied-intelligence)

View File

@ -1,14 +1,14 @@
---
type: docs
title: "How-To: Set-up New Relic for Dapr observability"
title: "How-To: Set-up New Relic for distributed tracing"
linkTitle: "New Relic"
weight: 2000
description: "Set-up New Relic for Dapr observability"
description: "Set-up New Relic for distributed tracing"
---
## Prerequisites
- Perpetually [free New Relic account](https://newrelic.com/signup), 100 GB/month of free data ingest, 1 free full access user, unlimited free basic users
- Perpetually [free New Relic account](https://newrelic.com/signup?ref=dapr), 100 GB/month of free data ingest, 1 free full access user, unlimited free basic users
## Configure Dapr tracing

View File

@ -31,7 +31,9 @@ The table below shows the versions of Dapr releases that have been tested togeth
| Release date | Runtime | CLI | SDKs | Dashboard | Status |
|--------------------|:--------:|:--------|---------|---------|---------|
| Feb 17th 2021 | 1.0.1</br>(Mar 4th 2021) | 1.0.1</br>(Mar 11th 2021) | Java 1.0.0 </br>Go 1.0.0 </br>PHP 1.0.0 </br>Python 1.0.0 </br>.NET 1.0.0 | 0.6.0 | Supported (current) |
| Feb 17th 2021 | 1.0.0</br>| 1.0.0 | Java 1.0.0 </br>Go 1.0.0 </br>PHP 1.0.0 </br>Python 1.0.0 </br>.NET 1.0.0 | 0.6.0 | Supported |
| Mar 4th 2021 | 1.0.1</br>| 1.0.1 | Java 1.0.2 </br>Go 1.0.0 </br>PHP 1.0.0 </br>Python 1.0.0 </br>.NET 1.0.0 | 0.6.0 | Supported |
| Apr 1st 2021 | 1.1.0</br> | 1.1.0 | Java 1.0.2 </br>Go 1.1.0 </br>PHP 1.0.0 </br>Python 1.1.0 </br>.NET 1.1.0 | 0.6.0 | Supported (current) |
## Upgrade paths
After the 1.0 release of the runtime there may be situations where it is necessary to explicitly upgrade through an additional release to reach the desired target. For example an upgrade from v1.0 to v1.2 may need go pass through v1.1
@ -40,8 +42,10 @@ The table below shows the tested upgrade paths for the Dapr runtime. For example
| Current Runtime version | Must upgrade through | Target Runtime version | Notes
|--------------------------|-----------------------|------------------------- |------------------------- |
| 0.11 | N/A | 1.0.0 | Use Dapr CLI to upgrade for both self hosted and Kubernetes
| 1.0-rc1 to 1.0-rc4 | N/A | 1.0.0 | See Dapr 1.0 release notes
| 0.11 | N/A | 1.0.1 | Use Dapr CLI to upgrade for both self hosted and Kubernetes
| | 1.0.1| 1.1.0 |
| 1.0-rc1 to 1.0-rc4 | N/A | 1.0.1 | See Dapr 1.0 release notes
| 1.0.0 or 1.0.1 | N/A | 1.1.0 | See Dapr 1.1 release notes
## Feature and deprecations
There is a process for announcing feature deprecations. Deprecations are applied two (2) releases after the release in which they were announced. For example Feature X is announced to be deprecated in the 1.0.0 release notes and will then be removed in 1.2.0.
@ -62,4 +66,4 @@ Here is an example, using a hypothetical 1.1.0 as the deprecation announcement r
Dapr can support multiple hosting platforms for production. With the 1.0 release the two supported platforms are Kubernetes and physical machines. For Kubernetes upgrades see [Production guidelines on Kubernetes]({{< ref kubernetes-production.md >}})
## Related links
* Read the [Versioning policy]({{< ref support-versioning.md >}})
* Read the [Versioning policy]({{< ref support-versioning.md >}})

View File

@ -27,6 +27,8 @@ dapr init [flags]
| `--enable-mtls` | | `true` | Enable mTLS in your cluster |
| `--help`, `-h` | | | Print this help message |
| `--kubernetes`, `-k` | | `false` | Deploy Dapr to a Kubernetes cluster |
| `--wait` | | `false` | Wait for Kubernetes initialization to complete |
| `--timeout` | | `300` | The wait timeout for the Kubernetes installation |
| `--namespace`, `-n` | | `dapr-system` | The Kubernetes namespace to install Dapr in |
| `--network` | `DAPR_NETWORK` | | The Docker network on which to deploy the Dapr runtime |
| `--runtime-version` | | `latest` | The version of the Dapr runtime to install, for example: `1.0.0` |
@ -44,6 +46,15 @@ dapr init
dapr init -k
```
### Initialize Dapr in Kubernetes and wait for the installation to complete
You can wait for the installation to complete its deployment with the `--wait` flag.
The default timeout is 300s (5 min), but can be customized with the `--timeout` flag.
```bash
dapr init -k --wait --timeout 600
```
### Initialize specified version of Dapr runtime in self-hosted mode
```bash
dapr init --runtime-version 0.10.0
@ -57,4 +68,4 @@ dapr init -k --runtime-version 0.10.0
### Initialize Dapr in [slim self-hosted mode]({{< ref self-hosted-no-docker.md >}})
```bash
dapr init -s
```
```

View File

@ -36,12 +36,12 @@ dapr upgrade -k
### Upgrade specified version of Dapr runtime in Kubernetes
```bash
dapr upgrade -k --runtime-version 1.0.0
dapr upgrade -k --runtime-version 1.1.0
```
### Upgrade specified version of Dapr runtime in Kubernetes with value set
```bash
dapr upgrade -k --runtime-version 1.0.0 --set global.logAsJson=true
dapr upgrade -k --runtime-version 1.1.0 --set global.logAsJson=true
```
# Related links

Binary file not shown.

After

Width:  |  Height:  |  Size: 323 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 331 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB