Merge pull request #209 from lburgazzoli/dapr-1.14

bump dapr to v1.14.1
This commit is contained in:
salaboy 2024-08-19 15:47:25 -05:00 committed by GitHub
commit b6a28f1f86
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 851 additions and 102 deletions

View File

@ -24,7 +24,7 @@ LOCALBIN := $(PROJECT_PATH)/bin
HELM_CHART_REPO ?= https://dapr.github.io/helm-charts
HELM_CHART ?= dapr
HELM_CHART_VERSION ?= 1.13.3
HELM_CHART_VERSION ?= 1.14.1
HELM_CHART_URL ?= https://raw.githubusercontent.com/dapr/helm-charts/master/dapr-$(HELM_CHART_VERSION).tgz
OPENSHIFT_VERSIONS ?= v4.12

View File

@ -21,7 +21,7 @@ limitations under the License.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)

View File

@ -83,6 +83,12 @@ rules:
- get
- patch
- update
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:

6
go.mod
View File

@ -3,7 +3,7 @@ module github.com/dapr/kubernetes-operator
go 1.22.6
require (
github.com/dapr/go-sdk v1.10.1
github.com/dapr/go-sdk v1.11.0
github.com/go-logr/logr v1.4.2
github.com/gorilla/mux v1.8.1
github.com/hashicorp/go-cleanhttp v0.5.2
@ -45,7 +45,7 @@ require (
github.com/containerd/containerd v1.7.12 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/dapr/dapr v1.13.3 // indirect
github.com/dapr/dapr v1.14.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
@ -142,7 +142,7 @@ require (
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.starlark.net v0.0.0-20230814145427-12f4cb8177e4 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.27.0 // indirect

16
go.sum
View File

@ -67,10 +67,10 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/dapr/dapr v1.13.3 h1:LJec44e5eRCugthRCvP8VRUi4B0MHW72lBEWCaVhXaM=
github.com/dapr/dapr v1.13.3/go.mod h1:0Yb0r3YViI4/D5yXV5nSYjRlEOarlXBeO089lW08VXE=
github.com/dapr/go-sdk v1.10.1 h1:g6mM2RXyGkrzsqWFfCy8rw+UAt1edQEgRaQXT+XP4PE=
github.com/dapr/go-sdk v1.10.1/go.mod h1:lPjyF/xubh35fbdNdKkxBbFxFNCmta4zmvsk0JxuUG0=
github.com/dapr/dapr v1.14.0 h1:SIQsNX1kH31JRDIS4k8IZ6eomM/BAcOP844PhQIT+BQ=
github.com/dapr/dapr v1.14.0/go.mod h1:oDNgaPHQIDZ3G4n4g89TElXWgkluYwcar41DI/oF4gw=
github.com/dapr/go-sdk v1.11.0 h1:clANpOQd6MsfvSa6snaX8MVk6eRx26Vsj5GxGdQ6mpE=
github.com/dapr/go-sdk v1.11.0/go.mod h1:btZ/tX8eYnx0fg3HiJUku8J5QBRXHsp3kAB1BUiTxXY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -383,8 +383,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
@ -439,8 +439,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

View File

@ -1,5 +1,5 @@
apiVersion: v1
appVersion: 1.13.3
appVersion: 1.14.1
description: A Helm chart for Dapr on Kubernetes
name: dapr
version: 1.13.3
version: 1.14.1

View File

@ -11,6 +11,7 @@ This chart installs Dapr via "child-charts":
* Dapr Sidecar injector
* Dapr Sentry
* Dapr Placement
* Dapr Scheduler
## Prerequisites
@ -34,7 +35,7 @@ For more details on initializing Helm, [read the Helm docs](https://helm.sh/docs
2. Install the Dapr chart on your cluster in the dapr-system namespace:
```
helm install dapr dapr/dapr --namespace dapr-system --wait
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --wait
```
## Verify installation
@ -72,50 +73,53 @@ dapr_operator:
The Helm chart has the follow configuration options that can be supplied:
### Global options:
| Parameter | Description | Default |
|-------------------------------------------|-------------------------------------------------------------------------|-------------------------|
| `global.registry` | Docker image registry | `docker.io/daprio` |
| `global.tag` | Docker image version tag | latest release |
| `global.logAsJson` | Json log format for control plane services | `false` |
| `global.imagePullPolicy` | Global Control plane service imagePullPolicy | `IfNotPresent` |
| `global.imagePullSecrets` | Control plane service images pull secrets for docker registry. Its value can be: a string with single imagePullSecret, an array of `{name: pullSecret}` maps (Kubernetes-style), or an array of strings | `[]` |
| `global.ha.enabled` | Highly Availability mode enabled for control plane | `false` |
| `global.ha.replicaCount` | Number of replicas of control plane services in Highly Availability mode<br>Note that in HA mode, Dapr Placement has 3 replicas and that cannot be configured. | `3` |
| `global.ha.disruption.minimumAvailable` | Minimum amount of available instances for control plane. This can either be effective count or %. | `` |
| `global.ha.disruption.maximumUnavailable` | Maximum amount of instances that are allowed to be unavailable for control plane. This can either be effective count or %. | `25%` |
| `global.prometheus.enabled` | Prometheus metrics enablement for control plane services | `true` |
| `global.prometheus.port` | Prometheus scrape http endpoint port | `9090` |
| `global.mtls.enabled` | Mutual TLS enablement | `true` |
| `global.mtls.workloadCertTTL` | TTL for workload cert | `24h` |
| `global.mtls.allowedClockSkew` | Allowed clock skew for workload cert rotation | `15m` |
| `global.mtls.controlPlaneTrustDomain ` | Trust domain for control plane | `cluster.local` |
| `global.mtls.sentryAddress` | Sentry address for control plane | `dapr-sentry.{{ .ReleaseNamespace }}.svc:443` |
| `global.mtls.mountSentryToken` | Gates whether the sentry bound service account token volume is mounted to control plane pods | `true` |
| `global.extraVolumes.sentry` | Array of extra volumes to make available to sentry pods | `[]` |
| `global.extraVolumes.placement` | Array of extra volumes to make available to placement pods | `[]` |
| `global.extraVolumes.operator` | Array of extra volumes to make available to operator pods | `[]` |
| `global.extraVolumes.injector` | Array of extra volumes to make available to sidecar injector pods | `[]` |
| `global.extraVolumeMounts.sentry` | Array of extra volume mounts to make available to sentry pod containers | `[]` |
| `global.extraVolumeMounts.placement` | Array of extra volume mounts to make available to placement pod containers | `[]` |
| `global.extraVolumeMounts.operator` | Array of extra volume mounts to make available to operator pod containers | `[]` |
| `global.extraVolumeMounts.injector` | Array of extra volume mounts to make available to sidecar injector pod containers | `[]` |
| `global.dnsSuffix` | Kuberentes DNS suffix | `.cluster.local` |
| `global.daprControlPlaneOs` | Operating System for Dapr control plane | `linux` |
| `global.daprControlPlaneArch` | CPU Architecture for Dapr control plane | `amd64` |
| `global.nodeSelector` | Pods will be scheduled onto a node node whose labels match the nodeSelector | `{}` |
| `global.tolerations` | Pods will be allowed to schedule onto a node whose taints match the tolerations | `[]` |
| `global.labels` | Custom pod labels | `{}` |
| `global.k8sLabels` | Custom metadata labels | `{}` |
| `global.issuerFilenames.ca` | Custom name of the file containing the root CA certificate inside the container | `ca.crt` |
| `global.issuerFilenames.cert` | Custom name of the file containing the leaf certificate inside the container | `issuer.crt` |
| `global.issuerFilenames.key` | Custom name of the file containing the leaf certificate's key inside the container | `issuer.key` |
| `global.actors.enabled` | Enables the Dapr actors building block. When "false", the Dapr Placement service is not installed, and attempting to use Dapr actors will fail. | `true` |
| `global.actors.serviceName` | Name of the service that provides actor placement services. | `placement` |
| `global.reminders.serviceName` | Name of the service that provides reminders functionality. If empty (the default), uses the built-in reminders capabilities in Dapr sidecars. | |
| `global.seccompProfile` | SeccompProfile for Dapr control plane services | `""` |
| `global.rbac.namespaced` | Removes cluster wide permissions where applicable | `false` |
| `global.argoRolloutServiceReconciler.enabled` | Enable the service reconciler for Dapr-enabled Argo Rollouts | `false` |
| `global.priorityClassName` | Adds `priorityClassName` to Dapr pods | `""` |
| Parameter | Description | Default |
|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|
| `global.registry` | Docker image registry | `docker.io/daprio` |
| `global.tag` | Docker image version tag | latest release |
| `global.logAsJson` | Json log format for control plane services | `false` |
| `global.imagePullPolicy` | Global Control plane service imagePullPolicy | `IfNotPresent` |
| `global.imagePullSecrets` | Control plane service images pull secrets for docker registry. Its value can be: a string with single imagePullSecret, an array of `{name: pullSecret}` maps (Kubernetes-style), or an array of strings | `[]` |
| `global.ha.enabled` | Highly Availability mode enabled for control plane | `false` |
| `global.ha.replicaCount` | Number of replicas of control plane services in Highly Availability mode<br>Note that in HA mode, Dapr Placement has 3 replicas and that cannot be configured. | `3` |
| `global.ha.disruption.minimumAvailable` | Minimum amount of available instances for control plane. This can either be effective count or %. | `` |
| `global.ha.disruption.maximumUnavailable` | Maximum amount of instances that are allowed to be unavailable for control plane. This can either be effective count or %. | `25%` |
| `global.prometheus.enabled` | Prometheus metrics enablement for control plane services | `true` |
| `global.prometheus.port` | Prometheus scrape http endpoint port | `9090` |
| `global.mtls.enabled` | Mutual TLS enablement | `true` |
| `global.mtls.workloadCertTTL` | TTL for workload cert | `24h` |
| `global.mtls.allowedClockSkew` | Allowed clock skew for workload cert rotation | `15m` |
| `global.mtls.controlPlaneTrustDomain ` | Trust domain for control plane | `cluster.local` |
| `global.mtls.sentryAddress` | Sentry address for control plane | `dapr-sentry.{{ .ReleaseNamespace }}.svc:443` |
| `global.mtls.mountSentryToken` | Gates whether the sentry bound service account token volume is mounted to control plane pods | `true` |
| `global.extraVolumes.sentry` | Array of extra volumes to make available to sentry pods | `[]` |
| `global.extraVolumes.placement` | Array of extra volumes to make available to placement pods | `[]` |
| `global.extraVolumes.operator` | Array of extra volumes to make available to operator pods | `[]` |
| `global.extraVolumes.injector` | Array of extra volumes to make available to sidecar injector pods | `[]` |
| `global.extraVolumes.scheduler` | Array of extra volumes to make available to scheduler pods | `[]` |
| `global.extraVolumeMounts.sentry` | Array of extra volume mounts to make available to sentry pod containers | `[]` |
| `global.extraVolumeMounts.placement` | Array of extra volume mounts to make available to placement pod containers | `[]` |
| `global.extraVolumeMounts.operator` | Array of extra volume mounts to make available to operator pod containers | `[]` |
| `global.extraVolumeMounts.injector` | Array of extra volume mounts to make available to sidecar injector pod containers | `[]` |
| `global.extraVolumeMounts.scheduler` | Array of extra volume mounts to make available to scheduler pod containers | `[]` |
| `global.dnsSuffix` | Kuberentes DNS suffix | `.cluster.local` |
| `global.daprControlPlaneOs` | Operating System for Dapr control plane | `linux` |
| `global.daprControlPlaneArch` | CPU Architecture for Dapr control plane | `amd64` |
| `global.nodeSelector` | Pods will be scheduled onto a node node whose labels match the nodeSelector | `{}` |
| `global.tolerations` | Pods will be allowed to schedule onto a node whose taints match the tolerations | `[]` |
| `global.labels` | Custom pod labels | `{}` |
| `global.k8sLabels` | Custom metadata labels | `{}` |
| `global.issuerFilenames.ca` | Custom name of the file containing the root CA certificate inside the container | `ca.crt` |
| `global.issuerFilenames.cert` | Custom name of the file containing the leaf certificate inside the container | `issuer.crt` |
| `global.issuerFilenames.key` | Custom name of the file containing the leaf certificate's key inside the container | `issuer.key` |
| `global.actors.enabled` | Enables the Dapr actors building block. When "false", the Dapr Placement service is not installed, and attempting to use Dapr actors will fail. | `true` |
| `global.actors.serviceName` | Name of the service that provides actor placement services. | `placement` |
| `global.reminders.serviceName` | Name of the service that provides reminders functionality. If empty (the default), uses the built-in reminders capabilities in Dapr sidecars. | |
| `global.seccompProfile` | SeccompProfile for Dapr control plane services | `""` |
| `global.rbac.namespaced` | Removes cluster wide permissions where applicable | `false` |
| `global.argoRolloutServiceReconciler.enabled` | Enable the service reconciler for Dapr-enabled Argo Rollouts | `false` |
| `global.priorityClassName` | Adds `priorityClassName` to Dapr pods | `""` |
| `global.scheduler.enabled` | Enables the Dapr Scheduler building block. When "false", the Dapr Scheduler service is not installed, and attempting to schedule jobs in Dapr will fail. | `true` |
### Dapr Operator options:
| Parameter | Description | Default |
@ -152,7 +156,7 @@ The Helm chart has the follow configuration options that can be supplied:
| `dapr_placement.maxActorApiLevel` | Sets the `max-api-level` flag which prevents the Actor API level from going above this value. The Placement service reports to all connected hosts the Actor API level as the minimum value observed in all actor hosts in the cluster. Actor hosts with a lower API level than the current API level in the cluster will not be able to connect to Placement. Setting a cap helps making sure that older versions of Dapr can connect to Placement as actor hosts, but may limit the capabilities of the actor subsystem. The default value of -1 means no cap. | `-1` |
| `dapr_placement.minActorApiLevel` | Sets the `min-api-level` flag, which enforces a minimum value for the Actor API level in the cluster. | `0` |
| `dapr_placement.scaleZero` | If true, the StatefulSet is deployed with a zero scale, regardless of the values of `global.ha.enabled` or `dapr_placement.ha` | `false` |
| `dapr_placement.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot`. Does not apply unless `forceInMemoryLog` is set to `true`. You may have to set this to `false` when running in Minikube | `false` |
| `dapr_placement.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot`. Does not apply unless `forceInMemoryLog` is set to `true`. You may have to set this to `false` when running in Minikube | `true` |
| `dapr_placement.resources` | Value of `resources` attribute. Can be used to set memory/cpu resources/limits. See the section "Resource configuration" above. Defaults to empty | `{}` |
| `dapr_placement.debug.enabled` | Boolean value for enabling debug mode | `{}` |
| `dapr_placement.metadataEnabled` | Boolean value for enabling placement tables metadata HTTP API | `false` |
@ -166,6 +170,25 @@ The Helm chart has the follow configuration options that can be supplied:
| `dapr_rbac.secretReader.enabled` | Deploys a default secret reader Role and RoleBinding | `true` |
| `dapr_rbac.secretReader.namespace` | Namespace for the default secret reader | `default` |
### Dapr Scheduler options:
| Parameter | Description | Default |
|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------|
| `dapr_scheduler.logLevel` | Service Log level | `info` |
| `dapr_scheduler.image.name` | Service docker image name (`global.registry/dapr_scheduler.image.name`) | `dapr` |
| `dapr_scheduler.cluster.etcdDataDirPath` | Mount path for persistent volume for log store in unix-like system | `/var/run/data/dapr-scheduler/etcd-data-dir` |
| `dapr_scheduler.cluster.etcdDataDirWinPath` | Mount path for persistent volume for log store in windows | `C:\\etcd-data-dir` |
| `dapr_scheduler.cluster.inMemoryStorage` | When `dapr_scheduler.cluster.inMemoryStorage` is set to `true`, sets the Scheduler data directory volume to an ephermeral in-memory mount rather than a persistent volume claim. Note that this results in complete **data loss** of job data in Scheduler on restarts. | `false` |
| `dapr_scheduler.cluster.storageClassName` | When set, uses this class to provision the database storage volume. | |
| `dapr_scheduler.cluster.storageSize` | When `dapr_scheduler.cluster.storageClassName` is set, sets the volume size request | `1Gi` |
| `dapr_scheduler.securityContext.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot`. You may have to set this to `false` when running in Minikube | `true` |
| `dapr_scheduler.securityContext.fsGroup` | Integer value for `securityContext.fsGroup`. Useful for adding the Scheduler process to the file system group that can write to the mounted database volume. | `65532` |
| `dapr_scheduler.resources` | Value of `resources` attribute. Can be used to set memory/cpu resources/limits. See the section "Resource configuration" above. Defaults to empty | `{}` |
| `dapr_scheduler.debug.enabled` | Boolean value for enabling debug mode | `{}` |
| `dapr_scheduler.statefulsetAnnotations` | Custom annotations for Dapr Scheduler Statefulset | `{}` |
| `dapr_scheduler.service.annotations` | Custom annotations for "dapr-scheduler-server" Service resource | `{}` |
| `dapr_scheduler.extraEnvVars` | Dictionary (key: value pairs) to use as extra environment variables in the injected sidecar containers (e.g. `my-env-var: "my-val"`, etc) | `{}` |
### Dapr Sentry options:
| Parameter | Description | Default |
|---|---|---|
@ -216,7 +239,7 @@ The Helm chart has the follow configuration options that can be supplied:
This command creates three replicas of each control plane pod for an HA deployment (with the exception of the Placement pod) in the dapr-system namespace:
```
helm install dapr dapr/dapr --namespace dapr-system --set global.ha.enabled=true --wait
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --set global.ha.enabled=true --wait
```
## Example of installing edge version of Dapr
@ -224,7 +247,7 @@ helm install dapr dapr/dapr --namespace dapr-system --set global.ha.enabled=true
This command deploys the latest `edge` version of Dapr to `dapr-system` namespace. This is useful if you want to deploy the latest version of Dapr to test a feature or some capability in your Kubernetes cluster.
```
helm install dapr dapr/dapr --namespace dapr-system --set-string global.tag=edge --wait
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --set-string global.tag=edge --wait
```
## Example of installing dapr on Minikube
@ -252,7 +275,7 @@ global:
Install dapr:
```bash
helm install dapr dapr/dapr --namespace dapr-system --values values.yml --wait
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --values values.yml --wait
```
## Example of debugging dapr
@ -289,6 +312,6 @@ Port forward the debugging port so that it's visible to your IDE:
kubectl port-forward dapr-operator-5c99475ffc-m9z9f 40000:40000 -n dapr-system
```
## Example of using nodeSelector option
```
helm install dapr dapr/dapr --namespace dapr-system --set global.nodeSelector.myLabel=myValue --wait
```bash
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --set global.nodeSelector.myLabel=myValue --wait
```

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr configuration
name: dapr_config
version: 1.13.3
version: 1.14.1

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Kubernetes Operator
name: dapr_operator
version: 1.13.3
version: 1.14.1

View File

@ -7,9 +7,17 @@ metadata:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.apiService.annotations }}
{{ toYaml .Values.apiService.annotations | indent 4}}
{{- end }}
{{- if or .Values.apiService.annotations .Values.global.prometheus.enabled }}
annotations:
{{- if .Values.global.prometheus.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- if .Values.apiService.annotations }}
{{- .Values.apiService.annotations | toYaml | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
app: dapr-operator
@ -28,6 +36,12 @@ spec:
targetPort: {{ .Values.ports.targetPort }}
name: legacy
{{ end }}
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
port: {{ .Values.global.prometheus.port }}
targetPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end}}
---
apiVersion: v1
kind: Service
@ -39,6 +53,7 @@ metadata:
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.webhookService.annotations }}
annotations:
{{ toYaml .Values.webhookService.annotations | indent 4}}
{{- end }}
spec:

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Kubernetes placement
name: dapr_placement
version: 1.13.3
version: 1.14.1

View File

@ -9,9 +9,17 @@ metadata:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4}}
{{- end }}
{{- if or .Values.service.annotations .Values.global.prometheus.enabled }}
annotations:
{{- if .Values.global.prometheus.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- if .Values.service.annotations }}
{{- .Values.service.annotations | toYaml | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
app: dapr-placement-server
@ -23,5 +31,11 @@ spec:
port: {{ .Values.ports.apiPort }}
- name: raft-node
port: {{ .Values.ports.raftRPCPort }}
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
port: {{ .Values.global.prometheus.port }}
targetPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end}}
clusterIP: None
{{- end }}
{{- end }}

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Kubernetes RBAC components
name: dapr_rbac
version: 1.13.3
version: 1.14.1

View File

@ -69,6 +69,9 @@ rules:
- apiGroups: ["dapr.io"]
resources: ["configurations"]
verbs: [ "get" ]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: [ "get" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1

View File

@ -0,0 +1,20 @@
{{/* Allows to create a ResourceQuota for the priority class if it is set to system-node-critical or system-cluster-critical
this is required in some cases to ensure that the priority class is allowed in the namespace
https://kubernetes.io/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default */}}
{{ if or (eq .Values.global.priorityClassName "system-node-critical") (eq .Values.global.priorityClassName "system-cluster-critical") }}
apiVersion: v1
kind: ResourceQuota
metadata:
name: system-critical-quota
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
scopeSelector:
matchExpressions:
- operator : In
scopeName: PriorityClass
values: [{{.Values.global.priorityClassName}}]
{{ end }}

View File

@ -0,0 +1,50 @@
{{- if (eq .Values.global.scheduler.enabled true) }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: dapr-scheduler
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
---
{{- if eq .Values.global.rbac.namespaced true }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-scheduler
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules: []
---
{{- if eq .Values.global.rbac.namespaced true }}
kind: RoleBinding
{{- else }}
kind: ClusterRoleBinding
{{- end }}
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-scheduler
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-scheduler
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if eq .Values.global.rbac.namespaced true }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
name: dapr-scheduler
{{- end }}

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Scheduler
name: dapr_scheduler
version: 1.14.1

View File

@ -0,0 +1,96 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "dapr_scheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dapr_scheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create initial cluster peer list dynamically based on replicaCount.
*/}}
{{- define "dapr_scheduler.initialcluster" -}}
{{- $initialCluster := "" -}}
{{- $namespace := .Release.Namespace -}}
{{- $replicaCount := include "dapr_scheduler.get-replicas" . | int -}}
{{- range $i, $e := until $replicaCount -}}
{{- $instanceName := printf "dapr-scheduler-server-%d" $i -}}
{{- $svcName := printf "%s.dapr-scheduler-server.%s.svc.cluster.local" $instanceName $namespace -}}
{{- $peer := printf "%s=http://%s:%d" $instanceName $svcName (int $.Values.ports.etcdGRPCPeerPort) -}}
{{- $initialCluster = printf "%s%s" $initialCluster $peer -}}
{{- if ne (int $i) (sub $replicaCount 1) -}}
{{- $initialCluster = printf "%s," $initialCluster -}}
{{- end -}}
{{- end -}}
{{- $initialCluster -}}
{{- end -}}
{{/*
Create etcd client ports list dynamically based on replicaCount.
*/}}
{{- define "dapr_scheduler.etcdclientports" -}}
{{- $etcdClientPorts := "" -}}
{{- $namespace := .Release.Namespace -}}
{{- $replicaCount := include "dapr_scheduler.get-replicas" . | int -}}
{{- range $i, $e := until $replicaCount -}}
{{- $instanceName := printf "dapr-scheduler-server-%d" $i -}}
{{- $clientPort := int $.Values.ports.etcdGRPCClientPort -}}
{{- $instancePortPair := printf "%s=%d" $instanceName $clientPort -}}
{{- if gt $i 0 -}}
{{- $etcdClientPorts = printf "%s,%s" $etcdClientPorts $instancePortPair -}}
{{- else -}}
{{- $etcdClientPorts = $instancePortPair -}}
{{- end -}}
{{- end -}}
{{- $etcdClientPorts -}}
{{- end -}}
{{/*
Create etcd client http ports list dynamically based on replicaCount.
*/}}
{{- define "dapr_scheduler.etcdclienthttpports" -}}
{{- $etcdClientHttpPorts := "" -}}
{{- $namespace := .Release.Namespace -}}
{{- $replicaCount := include "dapr_scheduler.get-replicas" . | int -}}
{{- range $i, $e := until $replicaCount -}}
{{- $instanceName := printf "dapr-scheduler-server-%d" $i -}}
{{- $clientPort := int $.Values.ports.etcdHTTPClientPort -}}
{{- $instancePortPair := printf "%s=%d" $instanceName $clientPort -}}
{{- if gt $i 0 -}}
{{- $etcdClientHttpPorts = printf "%s,%s" $etcdClientHttpPorts $instancePortPair -}}
{{- else -}}
{{- $etcdClientHttpPorts = $instancePortPair -}}
{{- end -}}
{{- end -}}
{{- $etcdClientHttpPorts -}}
{{- end -}}
{{/*
Gets the number of replicas. If global.ha.enabled is true, then 3. Otherwise, 1.
*/}}
{{- define "dapr_scheduler.get-replicas" -}}
{{- $replicas := 0 }}
{{- if and (eq true .Values.global.ha.enabled) (eq .Values.global.scheduler.enabled true) }}
{{- $replicas = 3 }}
{{- else if and (eq false .Values.global.ha.enabled) (eq .Values.global.scheduler.enabled true) -}}
{{- $replicas = 1 }}
{{- end }}
{{- $replicas }}
{{- end -}}

View File

@ -0,0 +1,32 @@
{{- if (eq .Values.global.scheduler.enabled true) }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: dapr-scheduler-server-disruption-budget
namespace: {{ .Release.Namespace }}
labels:
app: dapr-scheduler-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if .Values.global.ha.disruption.minimumAvailable }}
minAvailable: {{ .Values.global.ha.disruption.minimumAvailable }}
{{- end }}
{{- if .Values.global.ha.disruption.maximumUnavailable }}
maxUnavailable: {{ .Values.global.ha.disruption.maximumUnavailable }}
{{- end }}
selector:
matchLabels:
app: dapr-scheduler-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,45 @@
{{- if (eq .Values.global.scheduler.enabled true) }}
kind: Service
apiVersion: v1
metadata:
name: dapr-scheduler-server
namespace: {{ .Release.Namespace }}
labels:
app: dapr-scheduler-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if or .Values.service.annotations .Values.global.prometheus.enabled }}
annotations:
{{- if .Values.global.prometheus.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- if .Values.service.annotations }}
{{- .Values.service.annotations | toYaml | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
app: dapr-scheduler-server
# scheduler must be able to resolve pod address to join initial cluster peers
# before POD is ready
publishNotReadyAddresses: true
ports:
- name: api
port: 50006
- name: etcd-client
port: {{ .Values.ports.etcdGRPCClientPort }}
- name: etcd-httpclient
port: {{ .Values.ports.etcdHTTPClientPort }}
- name: etcd-peer
port: {{ .Values.ports.etcdGRPCPeerPort }}
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
port: {{ .Values.global.prometheus.port }}
targetPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end}}
clusterIP: None # make the service headless
{{- end }}

View File

@ -0,0 +1,269 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: dapr-scheduler-server
namespace: {{ .Release.Namespace }}
labels:
app: dapr-scheduler-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- $replicas := include "dapr_scheduler.get-replicas" . }}
replicas: {{ $replicas }}
serviceName: dapr-scheduler-server
podManagementPolicy: Parallel
selector:
matchLabels:
app: dapr-scheduler-server
{{- if not .Values.cluster.inMemoryStorage }}
volumeClaimTemplates:
- metadata:
name: dapr-scheduler-data-dir
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.cluster.storageClassName }}
resources:
requests:
storage: {{ .Values.cluster.storageSize }}
{{- end }}
template:
metadata:
labels:
app: dapr-scheduler-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
dapr.io/control-plane: scheduler
{{- if eq .Values.global.prometheus.enabled true }}
prometheus.io/scrape: "{{ .Values.global.prometheus.enabled }}"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- with .Values.statefulsetAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
containers:
- name: dapr-scheduler-server
livenessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- if contains "/" .Values.image.name }}
image: "{{ .Values.image.name }}"
{{- else }}
image: "{{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
- name: dapr-scheduler-data-dir
{{- if eq .Values.global.daprControlPlaneOs "windows" }}
mountPath: {{ .Values.cluster.etcdDataDirWinPath }}/
{{- else }}
mountPath: {{ .Values.cluster.etcdDataDirPath }}/
{{- end }}
readOnly: false
- name: dapr-trust-bundle
mountPath: /var/run/secrets/dapr.io/tls
readOnly: true
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
mountPath: /var/run/secrets/dapr.io/sentrytoken
{{- end }}
{{- with .Values.global.extraVolumeMounts.scheduler }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- containerPort: 50006
name: api
- containerPort: {{ .Values.ports.etcdGRPCClientPort }}
name: etcd-client
- containerPort: {{ .Values.ports.etcdHTTPClientPort }}
name: etcd-httpclient
- containerPort: {{ .Values.ports.etcdGRPCPeerPort }}
name: etcd-peer
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
containerPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end }}
command:
{{- if eq .Values.debug.enabled false }}
- "/scheduler"
{{- else }}
- "/dlv"
{{- end }}
args:
{{- if eq .Values.debug.enabled true }}
- "--listen=:{{ .Values.debug.port }}"
- "--accept-multiclient"
- "--headless=true"
- "--log"
- "exec"
- "/scheduler"
- "--"
{{- end }}
- "--listen-address=0.0.0.0"
- "--id"
- "$(SCHEDULER_ID)"
- "--replica-count"
{{- if (eq .Values.global.scheduler.enabled true) }}
- "{{ $replicas }}"
{{- else }}
- "0"
{{- end }}
- "--initial-cluster"
- {{ include "dapr_scheduler.initialcluster" . | toYaml | trim }}
- "--etcd-client-ports"
- {{ include "dapr_scheduler.etcdclientports" . | toYaml | trim }}
{{- if .Values.ports.etcdHTTPClientPort}}
- "--etcd-client-http-ports"
- {{ include "dapr_scheduler.etcdclienthttpports" . | toYaml | trim }}
{{- end }}
- "--log-level"
- {{ .Values.logLevel }}
{{- if eq .Values.global.logAsJson true }}
- "--log-as-json"
{{- end }}
{{- if eq .Values.global.prometheus.enabled true }}
- "--enable-metrics"
- "--metrics-port"
- "{{ .Values.global.prometheus.port }}"
{{- else }}
- "--enable-metrics=false"
{{- end }}
- "--etcd-data-dir={{ if eq .Values.global.daprControlPlaneOs "windows" }}{{ .Values.cluster.etcdDataDirWinPath }}{{- else }}{{ .Values.cluster.etcdDataDirPath }}{{- end }}/{{ .Release.Namespace }}/$(SCHEDULER_ID)"
- "--etcd-space-quota={{ int .Values.etcdSpaceQuota }}"
- "--etcd-compaction-mode={{ .Values.etcdCompactionMode }}"
- "--etcd-compaction-retention={{ .Values.etcdCompactionRetention }}"
- "--tls-enabled"
- "--trust-domain={{ .Values.global.mtls.controlPlaneTrustDomain }}"
- "--trust-anchors-file=/var/run/secrets/dapr.io/tls/ca.crt"
- "--sentry-address={{ if .Values.global.mtls.sentryAddress }}{{ .Values.global.mtls.sentryAddress }}{{ else }}dapr-sentry.{{ .Release.Namespace }}.svc.cluster.local:443{{ end }}"
- "--mode=kubernetes"
{{- if eq .Values.global.daprControlPlaneOs "linux" }}
securityContext:
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- if eq .Values.debug.enabled true }}
capabilities:
add: ["SYS_PTRACE"]
{{- else }}
readOnlyRootFilesystem: false
capabilities:
drop: ["ALL"]
{{- end }}
{{- if .Values.global.seccompProfile }}
seccompProfile:
type: {{ .Values.global.seccompProfile }}
{{- end }}
{{- end }}
env:
- name: SCHEDULER_ID
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- range $name, $value := .Values.extraEnvVars }}
- name: "{{ $name }}"
value: "{{ $value }}"
{{- end }}
serviceAccountName: dapr-scheduler
volumes:
{{- if .Values.cluster.inMemoryStorage }}
- name: dapr-scheduler-data-dir
emptyDir:
medium: Memory
{{- end }}
- name: dapr-trust-bundle
configMap:
name: dapr-trust-bundle
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 600
audience: "spiffe://{{ .Values.global.mtls.controlPlaneTrustDomain }}/ns/{{ .Release.Namespace }}/dapr-sentry"
{{- end }}
{{- with .Values.global.extraVolumes.scheduler }}
{{- toYaml . | nindent 6 }}
{{- end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- {{ .Values.global.daprControlPlaneOs }}
{{- if .Values.global.daprControlPlaneArch }}
- key: kubernetes.io/arch
operator: In
values:
- {{ .Values.global.daprControlPlaneArch }}
{{- end }}
{{- if (gt (int $replicas) 1) }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- dapr-scheduler-server
topologyKey: topology.kubernetes.io/zone
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "dapr.imagePullSecrets" (dict "imagePullSecrets" .Values.global.imagePullSecrets) | nindent 8 -}}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ toYaml .Values.global.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ toYaml .Values.global.tolerations | indent 8 }}
{{- end }}
{{- if .Values.global.priorityClassName }}
priorityClassName:
{{ toYaml .Values.global.priorityClassName | indent 8 }}
{{- end }}

View File

@ -0,0 +1,61 @@
# Default values for dapr_scheduler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
logLevel: info
component: scheduler
# Override this to use a custom scheduler service image.
# If the image name contains a "/", it is assumed to be a full docker image name, including the registry url and tag.
# Otherwise, the helm chart will use {{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}
image:
name: "scheduler"
nameOverride: ""
fullnameOverride: ""
statefulsetAnnotations: {}
service:
annotations: {}
ports:
protocol: TCP
etcdGRPCClientPort: 2379
etcdHTTPClientPort: 2330
etcdGRPCPeerPort: 2380
ha: false
cluster:
etcdDataDirPath: /var/run/data/dapr-scheduler
etcdDataDirWinPath: C:\\dapr-scheduler
storageClassName: ""
storageSize: 1Gi
inMemoryStorage: false
etcdSpaceQuota: 2147483648
etcdCompactionMode: periodic
etcdCompactionRetention: 24h
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 3
failureThreshold: 5
readinessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
debug:
enabled: false
port: 40000
initialDelaySeconds: 30000
securityContext:
runAsNonRoot: true
fsGroup: 65532
resources: {}
extraEnvVars: {}

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Sentry
name: dapr_sentry
version: 1.13.3
version: 1.14.1

View File

@ -7,9 +7,17 @@ metadata:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4}}
{{- end }}
{{- if or .Values.service.annotations .Values.global.prometheus.enabled }}
annotations:
{{- if .Values.global.prometheus.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- if .Values.service.annotations }}
{{- .Values.service.annotations | toYaml | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
app: dapr-sentry
@ -19,11 +27,9 @@ spec:
port: {{ .Values.ports.port }}
targetPort: {{ .Values.ports.targetPort }}
name: grpc
# Added for backwards compatibility where previous clients will attempt to
# connect on port 80.
{{ if (ne (int .Values.ports.port) 80) }}
- protocol: TCP
port: 80
targetPort: {{ .Values.ports.targetPort }}
name: legacy
{{ end }}
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
port: {{ .Values.global.prometheus.port }}
targetPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end}}

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: "1.0"
description: A Helm chart for the Dapr sidecar injector
name: dapr_sidecar_injector
version: 1.13.3
version: 1.14.1

View File

@ -8,9 +8,17 @@ metadata:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4}}
{{- end }}
{{- if or .Values.service.annotations .Values.global.prometheus.enabled }}
annotations:
{{- if .Values.global.prometheus.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- if .Values.service.annotations }}
{{- .Values.service.annotations | toYaml | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
app: dapr-sidecar-injector
@ -20,4 +28,10 @@ spec:
targetPort: https
protocol: TCP
name: https
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
port: {{ .Values.global.prometheus.port }}
targetPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end}}
{{- end }}

View File

@ -255,7 +255,15 @@ spec:
increasedCardinality:
description: 'If true, metrics for the HTTP server are collected
with increased cardinality. The default is true in Dapr 1.13,
but will be changed to false in 1.14+'
but will be changed to false in 1.15+'
type: boolean
pathMatching:
description: PathMatching defines the path matching configuration for HTTP server metrics.
type: array
items:
type: string
excludeVerbs:
description: If true (default is false) HTTP verbs (e.g., GET, POST) are excluded from the metrics.
type: boolean
type: object
rules:
@ -305,7 +313,22 @@ spec:
with increased cardinality. The default is true in Dapr 1.13,
but will be changed to false in 1.14+'
type: boolean
pathMatching:
description: PathMatching defines the path matching configuration for HTTP server metrics.
type: array
items:
type: string
excludeVerbs:
description: If true (default is false) HTTP verbs (e.g., GET, POST) are excluded from the metrics.
type: boolean
type: object
latencyDistributionBuckets:
description: 'If a list of integers is specified, use
these values for latency distribution buckets instead
of the default values.'
items:
type: integer
type: array
rules:
items:
description: MetricsRule defines configuration options for a

View File

@ -0,0 +1,7 @@
{{/*
Returns the address and port of the scheduler service
The returned value is a string in the format "<name>:<port>"
*/}}
{{- define "address.scheduler" -}}
{{- "dapr-scheduler-server:50006" }}
{{- end -}}

View File

@ -1,6 +1,6 @@
global:
registry: ghcr.io/dapr
tag: '1.13.3'
tag: '1.14.1'
dnsSuffix: ".cluster.local"
logAsJson: false
imagePullPolicy: IfNotPresent
@ -53,11 +53,13 @@ global:
# placement:
# operator:
# injector:
# scheduler:
extraVolumeMounts: {}
# sentry:
# placement:
# operator:
# injector:
# scheduler:
actors:
# Enables actor functionality in the cluster
@ -70,6 +72,10 @@ global:
# If empty, uses the built-in reminders capabilities in Dapr sidecars
serviceName: ""
scheduler:
# Enables scheduler functionality in the cluster
enabled: true
daprControlPlaneOs: linux
labels: {}
seccompProfile: ""

View File

@ -104,6 +104,7 @@ func NewReconciler(ctx context.Context, manager ctrlRt.Manager, o helm.Options)
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=*
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=*
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=*
// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=*
// +kubebuilder:rbac:groups=dapr.io,resources=components,verbs=*
// +kubebuilder:rbac:groups=dapr.io,resources=components/status,verbs=*
// +kubebuilder:rbac:groups=dapr.io,resources=components/finalizers,verbs=*

View File

@ -50,11 +50,11 @@ func (r *Reconciler) reconciliationRequest(res *daprApi.DaprInstance) (Reconcili
chartDir: r.helmOptions.ChartsDir,
ChartValues: make(map[string]interface{}),
chartOverrides: map[string]interface{}{
"dapr_operator": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_placement": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_sentry": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_dashboard": map[string]interface{}{"runAsNonRoot": "true"},
"apr_sidecar_injector": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_operator": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_placement": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_sentry": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_dashboard": map[string]interface{}{"runAsNonRoot": "true"},
"dapr_sidecar_injector": map[string]interface{}{"runAsNonRoot": true},
},
},
}

View File

@ -18,6 +18,36 @@ import (
daprTC "github.com/dapr/kubernetes-operator/test/e2e/common"
)
func TestDaprControlPlaneDeploy(t *testing.T) {
test := With(t)
instance := dapr.DeployControlPlane(
test,
daprAc.DaprControlPlaneSpec().
WithValues(dapr.Values(test, map[string]interface{}{
// enable pod watchdog as sometimes the sidecar for some
// (yet) unknown reason is not injected when the pod is
// created, hence the dapr app won't properly start up
"dapr_operator": map[string]interface{}{
"watchInterval": "1s",
},
})),
)
test.Eventually(CustomResourceDefinition(test, "components.dapr.io"), TestTimeoutLong).Should(Not(BeNil()))
test.Eventually(CustomResourceDefinition(test, "configurations.dapr.io"), TestTimeoutLong).Should(Not(BeNil()))
test.Eventually(CustomResourceDefinition(test, "httpendpoints.dapr.io"), TestTimeoutLong).Should(Not(BeNil()))
test.Eventually(CustomResourceDefinition(test, "resiliencies.dapr.io"), TestTimeoutLong).Should(Not(BeNil()))
test.Eventually(CustomResourceDefinition(test, "subscriptions.dapr.io"), TestTimeoutLong).Should(Not(BeNil()))
test.Eventually(Deployment(test, "dapr-operator", instance.Namespace), TestTimeoutLong).Should(
WithTransform(ConditionStatus(appsv1.DeploymentAvailable), Equal(corev1.ConditionTrue)))
test.Eventually(Deployment(test, "dapr-sentry", instance.Namespace), TestTimeoutLong).Should(
WithTransform(ConditionStatus(appsv1.DeploymentAvailable), Equal(corev1.ConditionTrue)))
test.Eventually(Deployment(test, "dapr-sidecar-injector", instance.Namespace), TestTimeoutLong).Should(
WithTransform(ConditionStatus(appsv1.DeploymentAvailable), Equal(corev1.ConditionTrue)))
}
func TestDaprControlPlaneDeployWithApp(t *testing.T) {
test := With(t)

View File

@ -61,7 +61,7 @@ func TestDaprInstanceDeployWithCustomChart(t *testing.T) {
test,
daprAc.DaprInstanceSpec().
WithChart(daprAc.ChartSpec().
WithVersion("1.13.2")).
WithVersion("1.14.0")).
WithValues(nil),
)
@ -82,7 +82,7 @@ func TestDaprInstanceDeployWithCustomChart(t *testing.T) {
WithTransform(json.Marshal, And(
jq.Match(`.status.chart.name == "dapr"`),
jq.Match(`.status.chart.repo == "https://dapr.github.io/helm-charts"`),
jq.Match(`.status.chart.version == "1.13.2"`),
jq.Match(`.status.chart.version == "1.14.0"`),
)),
)
}