mirror of https://github.com/dapr/docs.git
Merge branch 'v1.1' into patch-2
This commit is contained in:
commit
80c1e020cb
|
@ -0,0 +1,29 @@
|
|||
name: validate-links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v*
|
||||
tags:
|
||||
- v*
|
||||
pull_request:
|
||||
branches:
|
||||
- v*
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PYTHON_VER: 3.7
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ env.PYTHON_VER }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VER }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install --upgrade pip
|
||||
pip3 install setuptools wheel twine tox mechanical-markdown
|
||||
- name: Check Markdown Files
|
||||
run: |
|
||||
for name in `find . -name "*.md"`; do echo -e "------\n$name" ; mm.py -l $name || exit 1 ;done
|
|
@ -10,3 +10,7 @@
|
|||
[submodule "sdkdocs/dotnet"]
|
||||
path = sdkdocs/dotnet
|
||||
url = https://github.com/dapr/dotnet-sdk.git
|
||||
[submodule "translations/docs-zh"]
|
||||
path = translations/docs-zh
|
||||
url = https://github.com/dapr/docs-zh.git
|
||||
branch = v1.0_content
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
margin: 0rem 0;
|
||||
padding: 0rem;
|
||||
|
||||
margin-bottom: 2rem;
|
||||
|
||||
max-width: 100%;
|
||||
|
||||
pre {
|
||||
|
|
|
@ -10,6 +10,18 @@ enableGitInfo = true
|
|||
# Language Configuration
|
||||
languageCode = "en-us"
|
||||
|
||||
[languages]
|
||||
[languages.en]
|
||||
title = "Dapr Docs"
|
||||
weight = 1
|
||||
contentDir = "content/en"
|
||||
languageName = "English"
|
||||
[languages.zh-hans]
|
||||
title = "Dapr 文档库"
|
||||
weight = 2
|
||||
contentDir = "content/zh-hans"
|
||||
languageName = "简体中文"
|
||||
|
||||
# Disable categories & tags
|
||||
disableKinds = ["taxonomy", "term"]
|
||||
|
||||
|
@ -22,6 +34,7 @@ id = "UA-149338238-3"
|
|||
[[module.mounts]]
|
||||
source = "content/en"
|
||||
target = "content"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "static"
|
||||
target = "static"
|
||||
|
@ -37,21 +50,48 @@ id = "UA-149338238-3"
|
|||
[[module.mounts]]
|
||||
source = "archetypes"
|
||||
target = "archetypes"
|
||||
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/python/daprdocs/content/en/python-sdk-docs"
|
||||
target = "content/developing-applications/sdks/python"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/python/daprdocs/content/en/python-sdk-contributing"
|
||||
target = "content/contributing/"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/php/daprdocs/content/en/php-sdk-docs"
|
||||
target = "content/developing-applications/sdks/php"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/dotnet/daprdocs/content/en/dotnet-sdk-docs"
|
||||
target = "content/developing-applications/sdks/dotnet"
|
||||
lang = "en"
|
||||
[[module.mounts]]
|
||||
source = "../sdkdocs/dotnet/daprdocs/content/en/dotnet-sdk-contributing"
|
||||
target = "content/contributing/"
|
||||
lang = "en"
|
||||
|
||||
[[module.mounts]]
|
||||
source = "../translations/docs-zh/content/zh-hans"
|
||||
target = "content"
|
||||
lang = "zh-hans"
|
||||
[[module.mounts]]
|
||||
source = "../translations/docs-zh/content/contributing"
|
||||
target = "content/contributing/"
|
||||
lang = "zh-hans"
|
||||
[[module.mounts]]
|
||||
source = "../translations/docs-zh/content/sdks_python"
|
||||
target = "content/developing-applications/sdks/python"
|
||||
lang = "zh-hans"
|
||||
[[module.mounts]]
|
||||
source = "../translations/docs-zh/content/sdks_php"
|
||||
target = "content/developing-applications/sdks/php"
|
||||
lang = "zh-hans"
|
||||
[[module.mounts]]
|
||||
source = "../translations/docs-zh/content/sdks_dotnet"
|
||||
target = "content/developing-applications/sdks/dotnet"
|
||||
lang = "zh-hans"
|
||||
|
||||
# Markdown Engine - Allow inline html
|
||||
[markup]
|
||||
|
|
|
@ -24,6 +24,6 @@ The following are the building blocks provided by Dapr:
|
|||
| [**State management**]({{<ref "state-management-overview.md">}}) | `/v1.0/state` | Application state is anything an application wants to preserve beyond a single session. Dapr provides a key/value-based state API with pluggable state stores for persistence.
|
||||
| [**Publish and subscribe**]({{<ref "pubsub-overview.md">}}) | `/v1.0/publish` `/v1.0/subscribe`| Pub/Sub is a loosely coupled messaging pattern where senders (or publishers) publishes messages to a topic, to which subscribers subscribe. Dapr supports the pub/sub pattern between applications.
|
||||
| [**Resource bindings**]({{<ref "bindings-overview.md">}}) | `/v1.0/bindings` | A binding provides a bi-directional connection to an external cloud/on-premise service or system. Dapr allows you to invoke the external service through the Dapr binding API, and it allows your application to be triggered by events sent by the connected service.
|
||||
| [**Actors**]({{<ref "actors-overview.md">}}) | `/v1.0/actors` | An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the Virtual Actor pattern which provides a single-threaded programming model and where actors are garbage collected when not in use. See * [Actor Overview](./actors#understanding-actors)
|
||||
| [**Actors**]({{<ref "actors-overview.md">}}) | `/v1.0/actors` | An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the Virtual Actor pattern which provides a single-threaded programming model and where actors are garbage collected when not in use. See [Actor Overview](./actors#understanding-actors)
|
||||
| [**Observability**]({{<ref "observability-concept.md">}}) | `N/A` | Dapr system components and runtime emit metrics, logs, and traces to debug, operate and monitor Dapr system services, components and user applications.
|
||||
| [**Secrets**]({{<ref "secrets-overview.md">}}) | `/v1.0/secrets` | Dapr offers a secrets building block API and integrates with secret stores such as Azure Key Vault and Kubernetes to store the secrets. Service code can call the secrets API to retrieve secrets out of the Dapr supported secret stores.
|
||||
|
|
|
@ -36,7 +36,7 @@ The Dapr project is focused on performance due to the inherent discussion of Dap
|
|||
### What is the relationship between Dapr, Orleans and Service Fabric Reliable Actors?
|
||||
|
||||
The actors in Dapr are based on the same virtual actor concept that [Orleans](https://www.microsoft.com/research/project/orleans-virtual-actors/) started, meaning that they are activated when called and deactivated after a period of time. If you are familiar with Orleans, Dapr C# actors will be familiar. Dapr C# actors are based on [Service Fabric Reliable Actors](https://docs.microsoft.com/azure/service-fabric/service-fabric-reliable-actors-introduction) (which also came from Orleans) and enable you to take Reliable Actors in Service Fabric and migrate them to other hosting platforms such as Kubernetes or other on-premise environments.
|
||||
Also Dapr is about more than just actors. It provides you with a set of best practice building blocks to build into any microservices application. See [Dapr overview](https://github.com/dapr/docs/blob/master/overview/README.md).
|
||||
Also Dapr is about more than just actors. It provides you with a set of best practice building blocks to build into any microservices application. See [Dapr overview]({{< ref overview.md >}}).
|
||||
|
||||
### Differences between Dapr from an actor framework
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ This article addresses multiple security considerations when using Dapr in a dis
|
|||
|
||||
Several of the areas above are addressed through encryption of data in transit. One of the security mechanisms that Dapr employs for encrypting data in transit is [mutual authentication TLS](https://en.wikipedia.org/wiki/Mutual_authentication) or mTLS. mTLS offers a few key features for network traffic inside your application:
|
||||
|
||||
- Two way authentication - the client proving its identify to the server, and vice-versa
|
||||
- Two way authentication - the client proving its identity to the server, and vice-versa
|
||||
- An encrypted channel for all in-flight communication, after two-way authentication is established
|
||||
|
||||
Mutual TLS is useful in almost all scenarios, but especially so for systems subject to regulations such as [HIPAA](https://en.wikipedia.org/wiki/Health_Insurance_Portability_and_Accountability_Act) and [PCI](https://en.wikipedia.org/wiki/Payment_Card_Industry_Data_Security_Standard).
|
||||
|
|
|
@ -22,7 +22,7 @@ Follow the instructions in the repository [README.md](https://github.com/dapr/do
|
|||
|
||||
The Dapr docs handles branching differently than most code repositories. Instead of having a `master` or `main` branch, every branch is labeled to match the major and minor version of a runtime release. For the full list visit the [Docs repo](https://github.com/dapr/docs#branch-guidance)
|
||||
|
||||
Overall, all updates should go into the docs branch for the latest release of Dapr. You can find this directly at https://github.com/dapr/docs, as the latest release will be the default branch. For any docs changes that are applicable to a release candidate or a pre-release version of the docs, make your changes into that particular branch.
|
||||
Overall, all updates should go into the docs branch for the latest release of Dapr. You can find this directly at [https://github.com/dapr/docs](https://github.com/dapr/docs), as the latest release will be the default branch. For any docs changes that are applicable to a release candidate or a pre-release version of the docs, make your changes into that particular branch.
|
||||
|
||||
For example, if you are fixing a typo, adding notes, or clarifying a point, make your changes into the default Dapr branch. If you are documenting an upcoming change to a component or the runtime, make your changes to the pre-release branch. Branches can be found in the [Docs repo](https://github.com/dapr/docs#branch-guidance)
|
||||
|
||||
|
@ -232,3 +232,32 @@ The shortcode would be:
|
|||
|
||||
### References
|
||||
- [Docsy authoring guide](https://www.docsy.dev/docs/adding-content/)
|
||||
|
||||
## Translations
|
||||
|
||||
The Dapr Docs supports adding language translations into the docs using git submodules and Hugo's built in language support.
|
||||
|
||||
You can find an example PR of adding Chinese language support in [PR 1286](https://github.com/dapr/docs/pull/1286).
|
||||
|
||||
Steps to add a language:
|
||||
- Open an issue in the Docs repo requesting to create a new language-specific docs repo
|
||||
- Once created, create a git submodule within the docs repo:
|
||||
```sh
|
||||
git submodule add <remote_url> translations/<language_code>
|
||||
```
|
||||
- Add a language entry within `daprdocs/config.toml`:
|
||||
```toml
|
||||
[languages.<language_code>]
|
||||
title = "Dapr Docs"
|
||||
weight = 3
|
||||
contentDir = "content/<language_code>"
|
||||
languageName = "<language_name>"
|
||||
```
|
||||
- Create a mount within `daprdocs/config.toml`:
|
||||
```toml
|
||||
[[module.mounts]]
|
||||
source = "../translations/docs-<language_code>/content/<language_code>"
|
||||
target = "content"
|
||||
lang = "<language_code>"
|
||||
```
|
||||
- Repeat above step as necessary for all other translation directories
|
|
@ -38,7 +38,7 @@ Before you submit an issue, make sure you've checked the following:
|
|||
- 👎 down-vote
|
||||
1. For bugs
|
||||
- Check it's not an environment issue. For example, if running on Kubernetes, make sure prerequisites are in place. (state stores, bindings, etc.)
|
||||
- You have as much data as possible. This usually comes in the form of logs and/or stacktrace. If running on Kubernetes or other environment, look at the logs of the Dapr services (runtime, operator, placement service). More details on how to get logs can be found [here](https://github.com/dapr/docs/tree/master/best-practices/troubleshooting/logs.md).
|
||||
- You have as much data as possible. This usually comes in the form of logs and/or stacktrace. If running on Kubernetes or other environment, look at the logs of the Dapr services (runtime, operator, placement service). More details on how to get logs can be found [here]({{< ref "logs-troubleshooting.md" >}}).
|
||||
1. For proposals
|
||||
- Many changes to the Dapr runtime may require changes to the API. In that case, the best place to discuss the potential feature is the main [Dapr repo](https://github.com/dapr/dapr).
|
||||
- Other examples could include bindings, state stores or entirely new components.
|
||||
|
|
|
@ -221,7 +221,7 @@ $app->post('/dsstatus', function(
|
|||
$app->start();
|
||||
```
|
||||
|
||||
After creating `app1.php`, and with the [SDK installed](https://github.com/dapr/php-sdk/blob/main/docs/getting-started.md),
|
||||
After creating `app1.php`, and with the [SDK installed](https://docs.dapr.io/developing-applications/sdks/php/),
|
||||
go ahead and start the app:
|
||||
|
||||
```bash
|
||||
|
|
|
@ -15,7 +15,7 @@ When using state management your application can leverage features that would ot
|
|||
- Distributed concurrency and data consistency
|
||||
- Bulk [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) operations
|
||||
|
||||
Your application can used Dapr's state management API to save and read key/value pairs using a state store component, as shown in the diagram below. For example, by using HTTP POST you can save key/value pairs and by using HTTP GET you can read a key and have its value returned.
|
||||
Your application can use Dapr's state management API to save and read key/value pairs using a state store component, as shown in the diagram below. For example, by using HTTP POST you can save key/value pairs and by using HTTP GET you can read a key and have its value returned.
|
||||
|
||||
<img src="/images/state-management-overview.png" width=900>
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ Add a new `<tool></tool>` entry:
|
|||
<!-- 2. For Linux or MacOS use: /usr/local/bin/dapr -->
|
||||
<option name="COMMAND" value="C:\dapr\dapr.exe" />
|
||||
<!-- 3. Choose app, http and grpc ports that do not conflict with other daprd command entries (placement address should not change). -->
|
||||
<option name="PARAMETERS" value="run -app-id demoservice -app-port 3000 -dapr-http-port 3005 -dapr-grpc-port 52000 />
|
||||
<option name="PARAMETERS" value="run -app-id demoservice -app-port 3000 -dapr-http-port 3005 -dapr-grpc-port 52000" />
|
||||
<!-- 4. Use the folder where the `components` folder is located -->
|
||||
<option name="WORKING_DIRECTORY" value="C:/Code/dapr/java-sdk/examples" />
|
||||
</exec>
|
||||
|
|
|
@ -14,7 +14,7 @@ An SDK for Dapr should provide serialization for two use cases. First, for API o
|
|||
|
||||
```java
|
||||
DaprClient client = (new DaprClientBuilder()).build();
|
||||
client.invokeService(Verb.POST, "myappid", "saySomething", "My Message", null).block();
|
||||
client.invokeService("myappid", "saySomething", "My Message", HttpExtension.POST).block();
|
||||
```
|
||||
|
||||
In the example above, the app will receive a `POST` request for the `saySomething` method with the request payload as `"My Message"` - quoted since the serializer will serialize the input String to JSON.
|
||||
|
@ -139,7 +139,7 @@ redis-cli MGET "ActorStateIT_StatefulActorService||StatefulActorTest||1581130928
|
|||
{"value":"My data value."}
|
||||
```
|
||||
3. Custom serializers must serialize object to `byte[]`.
|
||||
4. Custom serializers must deserilize `byte[]` to object.
|
||||
4. Custom serializers must deserialize `byte[]` to object.
|
||||
5. When user provides a custom serializer, it should be transferred or persisted as `byte[]`. When persisting, also encode as Base64 string. This is done natively by most JSON libraries.
|
||||
```bash
|
||||
redis-cli MGET "ActorStateIT_StatefulActorService||StatefulActorTest||1581130928192||message
|
||||
|
@ -149,6 +149,5 @@ redis-cli MGET "ActorStateIT_StatefulActorService||StatefulActorTest||1581130928
|
|||
redis-cli MGET "ActorStateIT_StatefulActorService||StatefulActorTest||1581130928192||mydata
|
||||
"eyJ2YWx1ZSI6Ik15IGRhdGEgdmFsdWUuIn0="
|
||||
```
|
||||
6. When serializing a object that is a `byte[]`, the serializer should just pass it through since `byte[]` shoould be already handled internally in the SDK. The same happens when deserializing to `byte[]`.
|
||||
|
||||
*As of now, the [Java SDK](https://github.com/dapr/java-sdk/) is the only Dapr SDK that implements this specification. In the near future, other SDKs will also implement the same.*
|
||||
*As of now, the [Java SDK](https://github.com/dapr/java-sdk/) is the only Dapr SDK that implements this specification. In the near future, other SDKs will also implement the same.*
|
||||
|
|
|
@ -53,7 +53,7 @@ dapr --version
|
|||
Output should look like this:
|
||||
```
|
||||
CLI version: 1.0.0
|
||||
Runtime version: 1.0.0
|
||||
Runtime version: 1.0.1
|
||||
```
|
||||
|
||||
### Step 4: Verify containers are running
|
||||
|
|
|
@ -68,47 +68,48 @@ To perform a create blob operation, invoke the Azure Blob Storage binding with a
|
|||
|
||||
#### Examples
|
||||
|
||||
|
||||
##### Save text to a random generated UUID blob
|
||||
|
||||
{{< tabs Windows Linux >}}
|
||||
{{% codetab %}}
|
||||
On Windows, utilize cmd prompt (PowerShell has different escaping mechanism)
|
||||
```bash
|
||||
curl -d "{ \"operation\": \"create\", \"data\": \"Hello World\" }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
**Saving to a random generated UUID file**
|
||||
{{% codetab %}}
|
||||
On Windows, utilize cmd prompt (PowerShell has different escaping mechanism)
|
||||
```bash
|
||||
curl -d "{ \"operation\": \"create\", \"data\": \"Hello World\" }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "create", "data": "Hello World" }' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "create", "data": "Hello World" }' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
**Saving to a specific file**
|
||||
##### Save text to a specific blob
|
||||
|
||||
{{< tabs Windows Linux >}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d "{ \"operation\": \"create\", \"data\": \"Hello World\", \"metadata\": { \"blobName\": \"my-test-file.txt\" } }" \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d "{ \"operation\": \"create\", \"data\": \"Hello World\", \"metadata\": { \"blobName\": \"my-test-file.txt\" } }" \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "create", "data": "Hello World", "metadata": { "blobName": "my-test-file.txt" } }' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "create", "data": "Hello World", "metadata": { "blobName": "my-test-file.txt" } }' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
**Saving a file**
|
||||
##### Save a file to a blob
|
||||
|
||||
To upload a file, encode it as Base64 and let the Binding know to deserialize it:
|
||||
|
||||
|
@ -136,18 +137,18 @@ Then you can upload it as you would normally:
|
|||
|
||||
{{< tabs Windows Linux >}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d "{ \"operation\": \"create\", \"data\": \"YOUR_BASE_64_CONTENT\", \"metadata\": { \"blobName\": \"my-test-file.jpg\" } }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d "{ \"operation\": \"create\", \"data\": \"YOUR_BASE_64_CONTENT\", \"metadata\": { \"blobName\": \"my-test-file.jpg\" } }" http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "create", "data": "YOUR_BASE_64_CONTENT", "metadata": { "blobName": "my-test-file.jpg" } }' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "create", "data": "YOUR_BASE_64_CONTENT", "metadata": { "blobName": "my-test-file.jpg" } }' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
@ -179,18 +180,20 @@ To perform a get blob operation, invoke the Azure Blob Storage binding with a `P
|
|||
|
||||
{{< tabs Windows Linux >}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ \"operation\": \"get\", \"metadata\": { \"blobName\": \"myblob\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ \"operation\": \"get\", \"metadata\": { \"blobName\": \"myblob\" }}' http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "get", "metadata": { "blobName": "myblob" }}' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
{{% codetab %}}
|
||||
```bash
|
||||
curl -d '{ "operation": "get", "metadata": { "blobName": "myblob" }}' \
|
||||
http://localhost:<dapr-port>/v1.0/bindings/<binding-name>
|
||||
```
|
||||
{{% /codetab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
#### Response
|
||||
|
||||
|
@ -201,6 +204,7 @@ The response body contains the value stored in the blob object.
|
|||
By default the Azure Blob Storage output binding auto generates a UUID as the blob filename and is not assigned any system or custom metadata to it. It is configurable in the metadata property of the message (all optional).
|
||||
|
||||
Applications publishing to an Azure Blob Storage output binding should send a message with the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
"data": "file content",
|
||||
|
|
|
@ -43,7 +43,7 @@ kubectl create namespace namespace-a
|
|||
kubectl config set-context --current --namespace=namespace-a
|
||||
```
|
||||
|
||||
Install Redis (master and slave) on `namespace-a`, following [these instructions](https://github.com/dapr/docs/blob/master/howto/setup-pub-sub-message-broker/setup-redis.md).
|
||||
Install Redis (master and slave) on `namespace-a`, following [these instructions]({{< ref "configure-state-pubsub.md" >}}).
|
||||
|
||||
Now, configure `deploy/redis.yaml`, paying attention to the hostname containing `namespace-a`.
|
||||
|
||||
|
@ -125,4 +125,4 @@ kubectl delete namespace namespace-b
|
|||
|
||||
- [Scope components to one or more applications]({{< ref "component-scopes.md" >}})
|
||||
- [Use secret scoping]({{< ref "secrets-scopes.md" >}})
|
||||
- [Limit the secrets that can be read from secret stores]({{< ref "secret-scope.md" >}})
|
||||
- [Limit the secrets that can be read from secret stores]({{< ref "secret-scope.md" >}})
|
||||
|
|
|
@ -78,7 +78,7 @@ The above example uses secrets as plain strings. It is recommended to use a loca
|
|||
```bash
|
||||
az aks show -g <AKSResourceGroup> -n <AKSClusterName>
|
||||
```
|
||||
For more detail about the roles to assign to integrate AKS with Azure Services [Role Assignment](https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.role-assignment.md).
|
||||
For more detail about the roles to assign to integrate AKS with Azure Services [Role Assignment](https://azure.github.io/aad-pod-identity/docs/getting-started/role-assignment/).
|
||||
|
||||
4. Retrieve Managed Identity ID
|
||||
|
||||
|
|
|
@ -20,39 +20,42 @@ Table captions:
|
|||
|
||||
The following stores are supported, at various levels, by the Dapr state management building block:
|
||||
|
||||
> State stores can be used for actors if it supports both transactional operations and etag.
|
||||
|
||||
### Generic
|
||||
|
||||
| Name | CRUD | Transactional </br>(Supports Actors) | ETag | Status | Component version | Since |
|
||||
|----------------------------------------------------------------|------|---------------------|------|--------| -------|------|
|
||||
| [Aerospike]({{< ref setup-aerospike.md >}}) | ✅ | ❌ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Apache Cassandra]({{< ref setup-cassandra.md >}}) | ✅ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Cloudstate]({{< ref setup-cloudstate.md >}}) | ✅ | ❌ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Couchbase]({{< ref setup-couchbase.md >}}) | ✅ | ❌ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Hashicorp Consul]({{< ref setup-consul.md >}}) | ✅ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Hazelcast]({{< ref setup-hazelcast.md >}}) | ✅ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Memcached]({{< ref setup-memcached.md >}}) | ✅ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [MongoDB]({{< ref setup-mongodb.md >}}) | ✅ | ✅ | ❌ | GA | v1 | 1.0 |
|
||||
| [MySQL]({{< ref setup-mysql.md >}}) | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [PostgreSQL]({{< ref setup-postgresql.md >}}) | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Redis]({{< ref setup-redis.md >}}) | ✅ | ✅ | ✅ | GA | v1 | 1.0 |
|
||||
| [RethinkDB]({{< ref setup-rethinkdb.md >}}) | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Zookeeper]({{< ref setup-zookeeper.md >}}) | ✅ | ❌ | ✅ | Alpha | v1 | 1.0 |
|
||||
| Name | CRUD | Transactional | ETag | Actors | Status | Component version | Since |
|
||||
|----------------------------------------------------------------|------|---------------------|------|------|--------| -------|------|
|
||||
| [Aerospike]({{< ref setup-aerospike.md >}}) | ✅ | ❌ | ✅ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Apache Cassandra]({{< ref setup-cassandra.md >}}) | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Cloudstate]({{< ref setup-cloudstate.md >}}) | ✅ | ❌ | ✅ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Couchbase]({{< ref setup-couchbase.md >}}) | ✅ | ❌ | ✅ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Hashicorp Consul]({{< ref setup-consul.md >}}) | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Hazelcast]({{< ref setup-hazelcast.md >}}) | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [Memcached]({{< ref setup-memcached.md >}}) | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| [MongoDB]({{< ref setup-mongodb.md >}}) | ✅ | ✅ | ✅ | ✅ | GA | v1 | 1.0 |
|
||||
| [MySQL]({{< ref setup-mysql.md >}}) | ✅ | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [PostgreSQL]({{< ref setup-postgresql.md >}}) | ✅ | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Redis]({{< ref setup-redis.md >}}) | ✅ | ✅ | ✅ | ✅ | GA | v1 | 1.0 |
|
||||
| [RethinkDB]({{< ref setup-rethinkdb.md >}}) | ✅ | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Zookeeper]({{< ref setup-zookeeper.md >}}) | ✅ | ❌ | ✅ | ❌ | Alpha | v1 | 1.0 |
|
||||
|
||||
|
||||
### Amazon Web Services (AWS)
|
||||
| Name | CRUD | Transactional </br>(Supports Actors) | ETag | Status | Component version | Since |
|
||||
|------------------------------------------------------------------|------|---------------------|------|--------|-----|-------|
|
||||
| [AWS DynamoDB]({{< ref setup-dynamodb.md>}}) | ✅ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| Name | CRUD | Transactional | ETag | Actors | Status | Component version | Since |
|
||||
|------------------------------------------------------------------|------|---------------------|------|--------|-----|-----|-------|
|
||||
| [AWS DynamoDB]({{< ref setup-dynamodb.md>}}) | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
|
||||
### Google Cloud Platform (GCP)
|
||||
| Name | CRUD | Transactional </br>(Supports Actors) | ETag | Status | Component version | Since |
|
||||
|-------------------------------------------------------|------|---------------------|------|--------|-----|------|
|
||||
| [GCP Firestore]({{< ref setup-firestore.md >}}) | ✅ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
| Name | CRUD | Transactional | ETag | Actors | Status | Component version | Since |
|
||||
|------------------------------------------------------------------|------|---------------------|------|--------|-----|-----|-------|
|
||||
| [GCP Firestore]({{< ref setup-firestore.md >}}) | ✅ | ❌ | ❌ | ❌ | Alpha | v1 | 1.0 |
|
||||
|
||||
### Microsoft Azure
|
||||
|
||||
| Name | CRUD | Transactional </br>(Supports Actors) | ETag | Status | Component version | Since |
|
||||
|------------------------------------------------------------------|------|---------------------|------|--------| ------|-----|
|
||||
| [Azure Blob Storage]({{< ref setup-azure-blobstorage.md >}}) | ✅ | ❌ | ✅ | GA | v1 | 1.0 |
|
||||
| [Azure CosmosDB]({{< ref setup-azure-cosmosdb.md >}}) | ✅ | ✅ | ✅ | GA | v1 | 1.0 |
|
||||
| [Azure SQL Server]({{< ref setup-sqlserver.md >}}) | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Table Storage]({{< ref setup-azure-tablestorage.md >}}) | ✅ | ❌ | ✅ | Alpha | v1 | 1.0 |
|
||||
| Name | CRUD | Transactional | ETag | Actors | Status | Component version | Since |
|
||||
|------------------------------------------------------------------|------|---------------------|------|--------|-----|-----|-------|
|
||||
| [Azure Blob Storage]({{< ref setup-azure-blobstorage.md >}}) | ✅ | ❌ | ✅ | ❌ | GA | v1 | 1.0 |
|
||||
| [Azure CosmosDB]({{< ref setup-azure-cosmosdb.md >}}) | ✅ | ✅ | ✅ | ✅ | GA | v1 | 1.0 |
|
||||
| [Azure SQL Server]({{< ref setup-sqlserver.md >}}) | ✅ | ✅ | ✅ | ✅ | Alpha | v1 | 1.0 |
|
||||
| [Azure Table Storage]({{< ref setup-azure-tablestorage.md >}}) | ✅ | ❌ | ✅ | ❌ | Alpha | v1 | 1.0 |
|
||||
|
|
|
@ -112,7 +112,7 @@ We can use [Helm](https://helm.sh/) to quickly create a Redis instance in our Ku
|
|||
4. Once your instance is created, you'll need to grab the Host name (FQDN) and your access key.
|
||||
- for the Host name navigate to the resources "Overview" and copy "Host name"
|
||||
- for your access key navigate to "Access Keys" under "Settings" and copy your key.
|
||||
5. Finally, we need to add our key and our host to a `redis.yaml` file that Dapr can apply to our cluster. If you're running a sample, you'll add the host and key to the provided `redis.yaml`. If you're creating a project from the ground up, you'll create a `redis.yaml` file as specified in [Configuration](#configuration). Set the `redisHost` key to `[HOST NAME FROM PREVIOUS STEP]:6379` and the `redisPassword` key to the key you copied in step 4. **Note:** In a production-grade application, follow [secret management](https://github.com/dapr/docs/blob/master/concepts/components/secrets.md) instructions to securely manage your secrets.
|
||||
5. Finally, we need to add our key and our host to a `redis.yaml` file that Dapr can apply to our cluster. If you're running a sample, you'll add the host and key to the provided `redis.yaml`. If you're creating a project from the ground up, you'll create a `redis.yaml` file as specified in [Configuration](#configuration). Set the `redisHost` key to `[HOST NAME FROM PREVIOUS STEP]:6379` and the `redisPassword` key to the key you copied in step 4. **Note:** In a production-grade application, follow [secret management]({{< ref component-secrets.md >}}) instructions to securely manage your secrets.
|
||||
|
||||
> **NOTE:** Dapr pub/sub uses [Redis Streams](https://redis.io/topics/streams-intro) that was introduced by Redis 5.0, which isn't currently available on Azure Managed Redis Cache. Consequently, you can use Azure Managed Redis Cache only for state persistence.
|
||||
{{% /codetab %}}
|
||||
|
|
|
@ -35,3 +35,4 @@ The following table shows all the supported pod Spec annotations supported by Da
|
|||
| `dapr.io/sidecar-readiness-probe-period-seconds` | How often (in seconds) to perform the sidecar readiness probe. Read more [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `6`
|
||||
| `dapr.io/sidecar-readiness-probe-threshold` | When the sidecar readiness probe fails, Kubernetes will try N times before giving up. In this case, the Pod will be marked Unready. Read more about `failureThreshold` [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `3`
|
||||
| `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB
|
||||
| `dapr.io/env` | List of environment variable to be injected into the sidecar. Strings consisting of key=value pairs separated by a comma.
|
||||
|
|
|
@ -10,12 +10,7 @@ aliases:
|
|||
|
||||
When setting up Kubernetes you can use either the Dapr CLI or Helm.
|
||||
|
||||
As part of the Dapr initialization the following pods are installed:
|
||||
|
||||
- **dapr-operator:** Manages component updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.)
|
||||
- **dapr-sidecar-injector:** Injects Dapr into annotated deployment pods
|
||||
- **dapr-placement:** Used for actors only. Creates mapping tables that map actor instances to pods
|
||||
- **dapr-sentry:** Manages mTLS between services and acts as a certificate authority
|
||||
For more information on what is deployed to your Kubernetes cluster read the [Kubernetes overview]({{< ref kubernetes-overview.md >}})
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
@ -45,19 +40,18 @@ You can install Dapr to a Kubernetes cluster using the [Dapr CLI]({{< ref instal
|
|||
|
||||
The `-k` flag initializes Dapr on the Kubernetes cluster in your current context.
|
||||
|
||||
{{% alert title="Target cluster" color="primary" %}}
|
||||
{{% alert title="Ensure correct cluster is set" color="warning" %}}
|
||||
Make sure the correct "target" cluster is set. Check `kubectl context (kubectl config get-contexts)` to verify. You can set a different context using `kubectl config use-context <CONTEXT>`.
|
||||
{{% /alert %}}
|
||||
|
||||
Run on your local machine:
|
||||
Run the following command on your local machine to init Dapr on your cluster:
|
||||
|
||||
```bash
|
||||
dapr init -k
|
||||
```
|
||||
|
||||
```
|
||||
```bash
|
||||
⌛ Making the jump to hyperspace...
|
||||
ℹ️ Note: To install Dapr using Helm, see here: https://github.com/dapr/docs/blob/master/getting-started/environment-setup.md#using-helm-advanced
|
||||
|
||||
✅ Deploying the Dapr control plane to your cluster...
|
||||
✅ Success! Dapr has been installed to namespace dapr-system. To verify, run "dapr status -k" in your terminal. To get started, go here: https://aka.ms/dapr-getting-started
|
||||
|
@ -72,7 +66,7 @@ dapr init -k -n mynamespace
|
|||
```
|
||||
|
||||
|
||||
### Install in highly available mode:
|
||||
### Install in highly available mode
|
||||
|
||||
You can run Dapr with 3 replicas of each control plane pod in the dapr-system namespace for [production scenarios]({{< ref kubernetes-production.md >}}).
|
||||
|
||||
|
@ -90,26 +84,23 @@ dapr init -k --enable-mtls=false
|
|||
|
||||
### Uninstall Dapr on Kubernetes with CLI
|
||||
|
||||
```bash
|
||||
dapr uninstall --kubernetes
|
||||
```
|
||||
Run the following command on your local machine to uninstall Dapr on your cluster:
|
||||
|
||||
### Upgrade Dapr on a cluster
|
||||
To upgrade Dapr on a Kubernetes cluster you can use the CLI. See [upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) for more information.
|
||||
```bash
|
||||
dapr uninstall -k
|
||||
```
|
||||
|
||||
## Install with Helm (advanced)
|
||||
|
||||
You can install Dapr on Kubernetes using a Helm 3 chart.
|
||||
|
||||
|
||||
{{% alert title="Note" color="primary" %}}
|
||||
{{% alert title="Ensure you are on Helm v3" color="primary" %}}
|
||||
The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm v2 to Helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/).
|
||||
{{% /alert %}}
|
||||
|
||||
### Add and install Dapr Helm chart
|
||||
|
||||
1. Make sure [Helm 3](https://github.com/helm/helm/releases) is installed on your machine
|
||||
|
||||
2. Add Helm repo and update
|
||||
|
||||
```bash
|
||||
|
@ -118,47 +109,29 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm
|
|||
# See which chart versions are available
|
||||
helm search repo dapr --devel --versions
|
||||
```
|
||||
|
||||
3. Install the Dapr chart on your cluster in the `dapr-system` namespace.
|
||||
|
||||
```bash
|
||||
helm upgrade --install dapr dapr/dapr \
|
||||
--version=1.0.0 \
|
||||
--version=1.0.1 \
|
||||
--namespace dapr-system \
|
||||
--create-namespace \
|
||||
--wait
|
||||
```
|
||||
|
||||
To install in high availability mode:
|
||||
|
||||
To install in high availability mode:
|
||||
|
||||
```bash
|
||||
helm upgrade --install dapr dapr/dapr \
|
||||
--version=1.0.0 \
|
||||
--version=1.0.1 \
|
||||
--namespace dapr-system \
|
||||
--create-namespace \
|
||||
--set global.ha.enabled=true \
|
||||
--wait
|
||||
```
|
||||
|
||||
|
||||
See [Guidelines for production ready deployments on Kubernetes]({{<ref kubernetes-production.md>}}) for more information on installing and upgrading Dapr using Helm.
|
||||
|
||||
### Verify installation
|
||||
|
||||
Once the chart installation is complete, verify that the dapr-operator, dapr-placement, dapr-sidecar-injector and dapr-sentry pods are running in the `dapr-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl get pods --namespace dapr-system
|
||||
```
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dapr-dashboard-7bd6cbf5bf-xglsr 1/1 Running 0 40s
|
||||
dapr-operator-7bd6cbf5bf-xglsr 1/1 Running 0 40s
|
||||
dapr-placement-7f8f76778f-6vhl2 1/1 Running 0 40s
|
||||
dapr-sidecar-injector-8555576b6f-29cqm 1/1 Running 0 40s
|
||||
dapr-sentry-9435776c7f-8f7yd 1/1 Running 0 40s
|
||||
```
|
||||
|
||||
|
||||
See [Guidelines for production ready deployments on Kubernetes]({{<ref kubernetes-production.md>}}) for more information on installing and upgrading Dapr using Helm.
|
||||
|
||||
### Uninstall Dapr on Kubernetes
|
||||
|
||||
|
@ -171,6 +144,22 @@ helm uninstall dapr --namespace dapr-system
|
|||
- Read [this guide]({{< ref kubernetes-production.md >}}) for recommended Helm chart values for production setups
|
||||
- See [this page](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) for details on Dapr Helm charts.
|
||||
|
||||
## Verify installation
|
||||
|
||||
Once the installation is complete, verify that the dapr-operator, dapr-placement, dapr-sidecar-injector and dapr-sentry pods are running in the `dapr-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl get pods --namespace dapr-system
|
||||
```
|
||||
|
||||
```bash
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dapr-dashboard-7bd6cbf5bf-xglsr 1/1 Running 0 40s
|
||||
dapr-operator-7bd6cbf5bf-xglsr 1/1 Running 0 40s
|
||||
dapr-placement-7f8f76778f-6vhl2 1/1 Running 0 40s
|
||||
dapr-sidecar-injector-8555576b6f-29cqm 1/1 Running 0 40s
|
||||
dapr-sentry-9435776c7f-8f7yd 1/1 Running 0 40s
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
|
|
|
@ -22,21 +22,21 @@ You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers
|
|||
|
||||
2. Once you have set up the cluster, you should see that it has both Windows and Linux nodes available
|
||||
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
aks-nodepool1-11819434-vmss000000 Ready agent 6d v1.17.9 10.240.0.4 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
aks-nodepool1-11819434-vmss000001 Ready agent 6d v1.17.9 10.240.0.35 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
aks-nodepool1-11819434-vmss000002 Ready agent 5d10h v1.17.9 10.240.0.129 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
akswin000000 Ready agent 6d v1.17.9 10.240.0.66 <none> Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5
|
||||
akswin000001 Ready agent 6d v1.17.9 10.240.0.97 <none> Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5
|
||||
```
|
||||
## Installing the Dapr Control Plane
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
aks-nodepool1-11819434-vmss000000 Ready agent 6d v1.17.9 10.240.0.4 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
aks-nodepool1-11819434-vmss000001 Ready agent 6d v1.17.9 10.240.0.35 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
aks-nodepool1-11819434-vmss000002 Ready agent 5d10h v1.17.9 10.240.0.129 <none> Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure
|
||||
akswin000000 Ready agent 6d v1.17.9 10.240.0.66 <none> Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5
|
||||
akswin000001 Ready agent 6d v1.17.9 10.240.0.97 <none> Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5
|
||||
```
|
||||
## Installing the Dapr control plane
|
||||
|
||||
If you are installing using the Dapr CLI or via a helm chart, simply follow the normal deployment procedures:
|
||||
[Installing Dapr on a Kubernetes cluster]({{< ref "install-dapr-selfhost.md#installing-Dapr-on-a-kubernetes-cluster" >}})
|
||||
|
||||
Affinity will be automatically set for kubernetes.io/os=linux. This will be sufficient for most users, as Kubernetes requires at least one Linux node pool.
|
||||
Affinity will be automatically set for `kubernetes.io/os=linux`. This will be sufficient for most users, as Kubernetes requires at least one Linux node pool.
|
||||
|
||||
> **Note:** Dapr control plane containers are built and tested for both windows and linux, however, we generally recommend using the linux control plane containers. They tend to be smaller and have a much larger user base.
|
||||
|
||||
|
@ -53,109 +53,112 @@ In order to launch a Dapr application on Windows, you'll first need to create a
|
|||
|
||||
1. Create a deployment YAML
|
||||
|
||||
Here is a sample deployment with nodeAffinity set to "windows". Modify as needed for your application.
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: yourwinapp
|
||||
labels:
|
||||
app: applabel
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: applablel
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: applabel
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/id: "addapp"
|
||||
dapr.io/port: "6000"
|
||||
dapr.io/config: "appconfig"
|
||||
spec:
|
||||
containers:
|
||||
- name: add
|
||||
image: yourreponsitory/your-windows-dapr-container:your-tag
|
||||
ports:
|
||||
- containerPort: 6000
|
||||
imagePullPolicy: Always
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- windows
|
||||
```
|
||||
This deployment yaml will be the same as any other dapr application, with an additional spec.template.spec.affinity section as shown above.
|
||||
Here is a sample deployment with nodeAffinity set to "windows". Modify as needed for your application.
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: yourwinapp
|
||||
labels:
|
||||
app: applabel
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: applablel
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: applabel
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/id: "addapp"
|
||||
dapr.io/port: "6000"
|
||||
dapr.io/config: "appconfig"
|
||||
spec:
|
||||
containers:
|
||||
- name: add
|
||||
image: yourreponsitory/your-windows-dapr-container:your-tag
|
||||
ports:
|
||||
- containerPort: 6000
|
||||
imagePullPolicy: Always
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- windows
|
||||
```
|
||||
This deployment yaml will be the same as any other dapr application, with an additional spec.template.spec.affinity section as shown above.
|
||||
|
||||
2. Deploy to your Kubernetes cluster
|
||||
|
||||
```bash
|
||||
kubectl apply -f deploy_windows.yaml
|
||||
```
|
||||
```bash
|
||||
kubectl apply -f deploy_windows.yaml
|
||||
```
|
||||
|
||||
### Linux applications
|
||||
If you have already got a dapr application with runs on Linux, you'll still need to add affinity rules as above, but choose linux affinity instead.
|
||||
|
||||
1. Create a deployment YAML
|
||||
|
||||
Here is a sample deployment with nodeAffinity set to "linux". Modify as needed for your application.
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: yourlinuxapp
|
||||
labels:
|
||||
app: yourlabel
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: yourlabel
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: yourlabel
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/id: "addapp"
|
||||
dapr.io/port: "6000"
|
||||
dapr.io/config: "appconfig"
|
||||
spec:
|
||||
containers:
|
||||
- name: add
|
||||
image: yourreponsitory/your-application:your-tag
|
||||
ports:
|
||||
- containerPort: 6000
|
||||
imagePullPolicy: Always
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
```
|
||||
Here is a sample deployment with nodeAffinity set to "linux". Modify as needed for your application.
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: yourlinuxapp
|
||||
labels:
|
||||
app: yourlabel
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: yourlabel
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: yourlabel
|
||||
annotations:
|
||||
dapr.io/enabled: "true"
|
||||
dapr.io/id: "addapp"
|
||||
dapr.io/port: "6000"
|
||||
dapr.io/config: "appconfig"
|
||||
spec:
|
||||
containers:
|
||||
- name: add
|
||||
image: yourreponsitory/your-application:your-tag
|
||||
ports:
|
||||
- containerPort: 6000
|
||||
imagePullPolicy: Always
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
```
|
||||
|
||||
2. Deploy to your Kubernetes cluster
|
||||
```bash
|
||||
kubectl apply -f deploy_linux.yaml
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f deploy_linux.yaml
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl delete -f deploy_linux.yaml
|
||||
kubectl delete -f deploy_windows.yaml
|
||||
helm uninstall dapr
|
||||
```
|
||||
|
||||
## Related links
|
||||
|
||||
- See the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for examples of more advanced configuration via node affinity
|
||||
|
|
|
@ -6,12 +6,22 @@ weight: 10000
|
|||
description: "Overview of how to get Dapr running on your Kubernetes cluster"
|
||||
---
|
||||
|
||||
Dapr can be configured to run on any [Kubernetes cluster](https://github.com/dapr/quickstarts/tree/master/hello-kubernetes). In Kubernetes the `dapr-sidecar-injector` and `dapr-operator` services provide first class integration to launch Dapr as a sidecar container in the same pod as the service container and provide notifications of Dapr component updates provisioned into the cluster. Additionally, the `dapr-sidecar-injector` also injects the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT` into **all** the containers in the pod to enable user defined applications to easily communicate with Dapr without hardcoding Dapr port values.
|
||||
## Dapr on Kubernetes
|
||||
|
||||
The `dapr-sentry` service is a certificate authority that enables mutual TLS between Dapr sidecar instances for secure data encryption. For more information on the `Sentry` service read the [security overview]({{< ref "security-concept.md" >}})
|
||||
Dapr can be configured to run on any Kubernetes cluster. To achieve this, Dapr begins by deploying the `dapr-sidecar-injector`, `dapr-operator`, `dapr-placement`, and `dapr-sentry` Kubernetes services. These provide first-class integration to make running applications with Dapr easy.
|
||||
- **dapr-operator:** Manages [component]({{< ref components >}}) updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.)
|
||||
- **dapr-sidecar-injector:** Injects Dapr into [annotated](#adding-dapr-to-a-kubernetes-cluster) deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT` to enable user-defined applications to easily communicate with Dapr without hard-coding Dapr port values.
|
||||
- **dapr-placement:** Used for [actors]({{< ref actors >}}) only. Creates mapping tables that map actor instances to pods
|
||||
- **dapr-sentry:** Manages mTLS between services and acts as a certificate authority. For more information read the [security overview]({{< ref "security-concept.md" >}}).
|
||||
|
||||
<img src="/images/overview_kubernetes.png" width=800>
|
||||
|
||||
## Deploying Dapr to a Kubernetes cluster
|
||||
|
||||
Read [this guide]({{< ref kubernetes-deploy.md >}}) to learn how to deploy Dapr to your Kubernetes cluster.
|
||||
|
||||
## Adding Dapr to a Kubernetes deployment
|
||||
|
||||
Deploying and running a Dapr enabled application into your Kubernetes cluster is a simple as adding a few annotations to the deployment schemes. To give your service an `id` and `port` known to Dapr, turn on tracing through configuration and launch the Dapr sidecar container, you annotate your Kubernetes deployment like this.
|
||||
|
||||
```yml
|
||||
|
@ -21,6 +31,14 @@ Deploying and running a Dapr enabled application into your Kubernetes cluster is
|
|||
dapr.io/app-port: "3000"
|
||||
dapr.io/config: "tracing"
|
||||
```
|
||||
You can see some examples [here](https://github.com/dapr/quickstarts/tree/master/hello-kubernetes/deploy) in the Kubernetes getting started sample.
|
||||
|
||||
Explore additional [Kubernetes related topics]({{<ref kubernetes>}}) for more information about working with Dapr on Kubernetes.
|
||||
## Quickstart
|
||||
|
||||
You can see some examples [here](https://github.com/dapr/quickstarts/tree/master/hello-kubernetes) in the Kubernetes getting started quickstart.
|
||||
|
||||
## Related links
|
||||
|
||||
- [Deploy Dapr to a Kubernetes cluster]({{< ref kubernetes-deploy >}})
|
||||
- [Upgrade Dapr on a Kubernetes cluster]({{< ref kubernetes-upgrade >}})
|
||||
- [Production guidelines for Dapr on Kubernetes]({{< ref kubernetes-production.md >}})
|
||||
- [Dapr Kubernetes Quickstart](https://github.com/dapr/quickstarts/tree/master/hello-kubernetes)
|
||||
|
|
|
@ -15,11 +15,13 @@ Use the following resource settings might serve as a starting point. Requirement
|
|||
|
||||
| Deployment | CPU | Memory
|
||||
|-------------|-----|-------
|
||||
| Operator | Limit: 1, Request: 100m | Limit: 200Mi, Request: 100Mi
|
||||
| Sidecar Injector | Limit: 1, Request: 100m | Limit: 200Mi, Request: 30Mi
|
||||
| Sentry | Limit: 1, Request: 100m | Limit: 200Mi, Request: 30Mi
|
||||
| Placement | Limit: 1, Request: 250m | Limit: 150Mi, Request: 75Mi
|
||||
| Dashboard | Limit: 200m, Request: 50m | Limit: 200Mi, Request: 20Mi
|
||||
| **Operator** | Limit: 1, Request: 100m | Limit: 200Mi, Request: 100Mi
|
||||
| **Sidecar Injector** | Limit: 1, Request: 100m | Limit: 200Mi, Request: 30Mi
|
||||
| **Sentry** | Limit: 1, Request: 100m | Limit: 200Mi, Request: 30Mi
|
||||
| **Placement** | Limit: 1, Request: 250m | Limit: 150Mi, Request: 75Mi
|
||||
| **Dashboard** | Limit: 200m, Request: 50m | Limit: 200Mi, Request: 20Mi
|
||||
|
||||
### Helm
|
||||
|
||||
When installing Dapr using Helm, no default limit/request values are set. Each component has a `resources` option (for example, `dapr_dashboard.resources`), which you can use to tune the Dapr control plane to fit your environment. The [Helm chart readme](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) has detailed information and examples. For local/dev installations, you might simply want to skip configuring the `resources` options.
|
||||
|
||||
|
@ -27,23 +29,23 @@ When installing Dapr using Helm, no default limit/request values are set. Each c
|
|||
|
||||
The following Dapr control plane deployments are optional:
|
||||
|
||||
* Placement - Needed for Dapr Actors
|
||||
* Sentry - Needed for mTLS for service to service invocation
|
||||
* Dashboard - Needed for operational view of the cluster
|
||||
- **Placement** - Needed for Dapr Actors
|
||||
- **Sentry** - Needed for mTLS for service to service invocation
|
||||
- **Dashboard** - Needed for operational view of the cluster
|
||||
|
||||
## Sidecar resource settings
|
||||
|
||||
To set the resource assignments for the Dapr sidecar, see the annotations [here]({{< ref "kubernetes-annotations.md" >}}).
|
||||
The specific annotations related to resource constraints are:
|
||||
|
||||
* `dapr.io/sidecar-cpu-limit`
|
||||
* `dapr.io/sidecar-memory-limit`
|
||||
* `dapr.io/sidecar-cpu-request`
|
||||
* `dapr.io/sidecar-memory-request`
|
||||
- `dapr.io/sidecar-cpu-limit`
|
||||
- `dapr.io/sidecar-memory-limit`
|
||||
- `dapr.io/sidecar-cpu-request`
|
||||
- `dapr.io/sidecar-memory-request`
|
||||
|
||||
If not set, the dapr sidecar will run without resource settings, which may lead to issues. For a production-ready setup it is strongly recommended to configure these settings.
|
||||
|
||||
For more details on configuring resource in Kubernetes see [Assign Memory Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/) and [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/)
|
||||
For more details on configuring resource in Kubernetes see [Assign Memory Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/) and [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/).
|
||||
|
||||
Example settings for the dapr sidecar in a production-ready setup:
|
||||
|
||||
|
@ -55,13 +57,16 @@ Example settings for the dapr sidecar in a production-ready setup:
|
|||
|
||||
The CPU and memory limits above account for the fact that Dapr is intended to a high number of I/O bound operations. It is strongly recommended that you use a tool monitoring tool to baseline the sidecar (and app) containers and tune these settings based on those baselines.
|
||||
|
||||
## Highly-available mode
|
||||
|
||||
When deploying Dapr in a production-ready configuration, it's recommended to deploy with a highly available configuration of the control plane, which creates 3 replicas of each control plane pod in the dapr-system namespace.
|
||||
|
||||
## Deploying Dapr with Helm
|
||||
|
||||
When deploying to a production cluster, it's recommended to use Helm. Although the Dapr CLI can install Dapr onto a Kubernetes cluster, it is intended for use in dev/test scenarios.
|
||||
You can find information [here]({{< ref "install-dapr-selfhost.md#using-helm-advanced" >}}) on how to deploy Dapr using Helm.
|
||||
|
||||
When deploying Dapr in a production-ready configuration, it's recommended to deploy with a highly available configuration of the control plane. It is recommended to create a values file instead of specifying parameters on the command-line. This file should be checked in to source control so that you can track changes made to it.
|
||||
For a full guide on deploying Dapr with Helm visit [this guide]({{< ref "kubernetes-deploy.md#install-with-helm-advanced" >}}).
|
||||
|
||||
### Parameters file
|
||||
It is recommended to create a values file instead of specifying parameters on the command-line. This file should be checked in to source control so that you can track changes made to it.
|
||||
|
||||
For a full list of all available options you can set in the values file (or by using the `--set` command-line option), see https://github.com/dapr/dapr/blob/master/charts/dapr/README.md.
|
||||
|
||||
|
@ -108,18 +113,18 @@ Dapr supports zero downtime upgrades. The upgrade path includes the following st
|
|||
|
||||
### Upgrading the CLI
|
||||
|
||||
To upgrade the Dapr CLI, [download the latest version](https://github.com/dapr/cli/releases) of the CLI. After you downloaded the binary, it's recommended you put the CLI binary in your path.
|
||||
To upgrade the Dapr CLI, [download the latest version](https://github.com/dapr/cli/releases) of the CLI and ensure it's in your path.
|
||||
|
||||
### Upgrading the control plane
|
||||
|
||||
See [Steps to upgrade Dapr on a Kubernetes cluster]({{< ref kubernetes-upgrade.md >}})
|
||||
See [steps to upgrade Dapr on a Kubernetes cluster]({{< ref "kubernetes-upgrade.md#helm" >}}).
|
||||
|
||||
### Updating the data plane (sidecars)
|
||||
|
||||
The last step is to update pods that are running Dapr to pick up the new version of the Dapr runtime.
|
||||
To do that, simply issue a rollout restart command for any deployment that has the `dapr.io/enabled` annotation:
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl rollout restart deploy/<Application deployment name>
|
||||
```
|
||||
|
||||
|
@ -138,27 +143,29 @@ When properly configured, Dapr ensures secure communication. It can also make yo
|
|||
|
||||
It is recommended that a production-ready deployment includes the following settings:
|
||||
|
||||
1. Mutual Authentication (mTLS) should be enabled. Note that Dapr has mTLS on by default. For details on how to bring your own certificates, see [here]({{< ref "mtls.md#bringing-your-own-certificates" >}})
|
||||
1. **Mutual Authentication (mTLS)** should be enabled. Note that Dapr has mTLS on by default. For details on how to bring your own certificates, see [here]({{< ref "mtls.md#bringing-your-own-certificates" >}})
|
||||
|
||||
2. App to Dapr API authentication is enabled. This is the communication between your application and the Dapr sidecar. To secure the Dapr API from unauthorized application access, it is recommended to enable Dapr's token based auth. See [enable API token authentication in Dapr]({{< ref "api-token.md" >}}) for details
|
||||
2. **App to Dapr API authentication** is enabled. This is the communication between your application and the Dapr sidecar. To secure the Dapr API from unauthorized application access, it is recommended to enable Dapr's token based auth. See [enable API token authentication in Dapr]({{< ref "api-token.md" >}}) for details
|
||||
|
||||
3. Dapr to App API authentication is enabled. This is the communication between Dapr and your application. This ensures that Dapr knows that it is communicating with an authorized application. See [Authenticate requests from Dapr using token authentication]({{< ref "app-api-token.md" >}}) for details
|
||||
3. **Dapr to App API authentication** is enabled. This is the communication between Dapr and your application. This ensures that Dapr knows that it is communicating with an authorized application. See [Authenticate requests from Dapr using token authentication]({{< ref "app-api-token.md" >}}) for details
|
||||
|
||||
4. All component YAMLs should have secret data configured in a secret store and not hard-coded in the YAML file. See [here]({{< ref "component-secrets.md" >}}) on how to use secrets with Dapr components
|
||||
4. All component YAMLs should have **secret data configured in a secret store** and not hard-coded in the YAML file. See [here]({{< ref "component-secrets.md" >}}) on how to use secrets with Dapr components
|
||||
|
||||
5. The Dapr control plane is installed on a dedicated namespace such as `dapr-system`.
|
||||
5. The Dapr **control plane is installed on a dedicated namespace** such as `dapr-system`.
|
||||
|
||||
6. Dapr also supports scoping components for certain applications. This is not a required practice, and can be enabled according to your security needs. See [here]({{< ref "component-scopes.md" >}}) for more info.
|
||||
6. Dapr also supports **scoping components for certain applications**. This is not a required practice, and can be enabled according to your security needs. See [here]({{< ref "component-scopes.md" >}}) for more info.
|
||||
|
||||
|
||||
## Tracing and metrics configuration
|
||||
|
||||
Dapr has tracing and metrics enabled by default.
|
||||
To configure a tracing backend for Dapr visit [this]({{< ref "setup-tracing.md" >}}) link.
|
||||
Dapr has tracing and metrics enabled by default. It is *recommended* that you set up distributed tracing and metrics for your applications and the Dapr control plane in production.
|
||||
|
||||
For metrics, Dapr exposes a Prometheus endpoint listening on port 9090 which can be scraped by Prometheus.
|
||||
|
||||
It is *recommended* that you set up distributed tracing and metrics for your applications and the Dapr control plane in production.
|
||||
If you already have your own observability set-up, you can disable tracing and metrics for Dapr.
|
||||
|
||||
### Tracing
|
||||
To configure a tracing backend for Dapr visit [this]({{< ref "setup-tracing.md" >}}) link.
|
||||
|
||||
### Metrics
|
||||
For metrics, Dapr exposes a Prometheus endpoint listening on port 9090 which can be scraped by Prometheus.
|
||||
|
||||
To setup Prometheus, Grafana and other monitoring tools with Dapr, visit [this]({{< ref "monitoring" >}}) link.
|
||||
|
|
|
@ -8,23 +8,25 @@ description: "Follow these steps to upgrade Dapr on Kubernetes and ensure a smoo
|
|||
|
||||
## Prerequisites
|
||||
|
||||
- [Dapr CLI]({{< ref install-dapr-cli.md >}}))
|
||||
- [Dapr CLI]({{< ref install-dapr-cli.md >}})
|
||||
- [Helm 3](https://github.com/helm/helm/releases) (if using Helm)
|
||||
|
||||
## Upgrade existing cluster to 1.0.0
|
||||
There are two ways to upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm. The preferred way is to use the Dapr CLI.
|
||||
## Upgrade existing cluster to 1.0.1
|
||||
There are two ways to upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm.
|
||||
|
||||
### Dapr CLI
|
||||
The example below shows how to upgrade to version 1.0.0
|
||||
|
||||
The example below shows how to upgrade to version 1.0.1:
|
||||
|
||||
```bash
|
||||
dapr upgrade -k --runtime-version=1.0.0
|
||||
dapr upgrade -k --runtime-version=1.0.1
|
||||
```
|
||||
|
||||
You can provide all the available Helm chart configurations using the Dapr CLI.
|
||||
See [here](https://github.com/dapr/cli#supplying-helm-values) for more info.
|
||||
|
||||
### Helm
|
||||
|
||||
From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive action since existing certificate values will automatically be re-used.
|
||||
|
||||
1. Upgrade Dapr from 1.0.0 (or newer) to any [NEW VERSION] > v1.0.0:
|
||||
|
|
|
@ -51,9 +51,9 @@ INFO[0001] leader is established. instance=Nicoletaz-L10.
|
|||
|
||||
```
|
||||
|
||||
From here on you can follow the sample example created for the [java-sdk](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/actors/http), [python-sdk](https://github.com/dapr/python-sdk/tree/master/examples/demo_actor) or [dotnet-sdk](https://github.com/dapr/dotnet-sdk/tree/master/samples/Actor) for running an application with Actors enabled.
|
||||
From here on you can follow the sample example created for the [java-sdk](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/actors), [python-sdk](https://github.com/dapr/python-sdk/tree/master/examples/demo_actor) or [dotnet-sdk]({{< ref "dotnet-actor-howto.md" >}}) for running an application with Actors enabled.
|
||||
|
||||
Update the state store configuration files to have the Redis host and password match the setup that you have. Additionally to enable it as a actor state store have the metadata piece added similar to the [sample Java Redis component](https://github.com/dapr/java-sdk/blob/master/examples/components/redis.yaml) definition.
|
||||
Update the state store configuration files to have the Redis host and password match the setup that you have. Additionally to enable it as a actor state store have the metadata piece added similar to the [sample Java Redis component](https://github.com/dapr/java-sdk/blob/master/examples/components/state/redis.yaml) definition.
|
||||
|
||||
```yaml
|
||||
- name: actorStateStore
|
||||
|
|
|
@ -25,11 +25,11 @@ description: "Follow these steps to upgrade Dapr in self-hosted mode and ensure
|
|||
dapr init
|
||||
```
|
||||
|
||||
1. Ensure you are using the latest version of Dapr (1.0.0) with:
|
||||
1. Ensure you are using the latest version of Dapr (1.0.1) with:
|
||||
|
||||
```bash
|
||||
$ dapr --version
|
||||
|
||||
CLI version: 1.0.0
|
||||
Runtime version: 1.0.0
|
||||
Runtime version: 1.0.1
|
||||
```
|
||||
|
|
|
@ -31,7 +31,7 @@ The table below shows the versions of Dapr releases that have been tested togeth
|
|||
|
||||
| Release date | Runtime | CLI | SDKs | Dashboard | Status |
|
||||
|--------------------|:--------:|:--------|---------|---------|---------|
|
||||
| Feb 17th 2021 | 1.0.0 | 1.0.0 | Java 1.0.0 </br>Go 1.0.0 </br>PHP 1.0.0 </br>Python 1.0.0 </br>.NET 1.0.0 | 0.6.0 | Supported (current) |
|
||||
| Feb 17th 2021 | 1.0.1</br>(Mar 4th 2021) | 1.0.0 | Java 1.0.0 </br>Go 1.0.0 </br>PHP 1.0.0 </br>Python 1.0.0 </br>.NET 1.0.0 | 0.6.0 | Supported (current) |
|
||||
|
||||
## Upgrade paths
|
||||
After the 1.0 release of the runtime there may be situations where it is necessary to explicitly upgrade through an additional release to reach the desired target. For example an upgrade from v1.0 to v1.2 may need go pass through v1.1
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
{{ if .Path }}
|
||||
{{ $pathFormatted := replace .Path "\\" "/" }}
|
||||
{{ $gh_repo := ($.Param "github_repo") }}
|
||||
{{ $gh_subdir := ($.Param "github_subdir") }}
|
||||
{{ $gh_project_repo := ($.Param "github_project_repo") }}
|
||||
{{ $gh_branch := (default "master" ($.Param "github_branch")) }}
|
||||
{{ if $gh_repo }}
|
||||
<div class="td-page-meta ml-2 pb-1 pt-2 mb-0">
|
||||
{{ $gh_repo_path := printf "%s/content/%s" $gh_branch $pathFormatted }}
|
||||
{{ if and ($gh_subdir) (.Site.Language.Lang) }}
|
||||
{{ $gh_repo_path = printf "%s/%s/content/%s/%s" $gh_branch $gh_subdir ($.Site.Language.Lang) $pathFormatted }}
|
||||
{{ else if .Site.Language.Lang }}
|
||||
{{ $gh_repo_path = printf "%s/content/%s/%s" $gh_branch ($.Site.Language.Lang) $pathFormatted }}
|
||||
{{ else if $gh_subdir }}
|
||||
{{ $gh_repo_path = printf "%s/%s/content/%s" $gh_branch $gh_subdir $pathFormatted }}
|
||||
{{ end }}
|
||||
{{ $editURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }}
|
||||
{{ $createURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }}
|
||||
{{ $issuesURL := printf "%s/issues/new/choose" $gh_repo}}
|
||||
{{ $newPageStub := resources.Get "stubs/new-page-template.md" }}
|
||||
{{ $newPageQS := querify "value" $newPageStub.Content "filename" "change-me.md" | safeURL }}
|
||||
{{ $newPageURL := printf "%s/new/%s?%s" $gh_repo $gh_repo_path $newPageQS }}
|
||||
|
||||
<a href="{{ $editURL }}" target="_blank"><i class="fa fa-edit fa-fw"></i> {{ T "post_edit_this" }}</a>
|
||||
<a href="{{ $issuesURL }}" target="_blank"><i class="fab fa-github fa-fw"></i> {{ T "post_create_issue" }}</a>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
|
@ -0,0 +1 @@
|
|||
Subproject commit e26148b866552f94a148c5ad6fe81e066775b804
|
Loading…
Reference in New Issue