diff --git a/api/go.mod b/api/go.mod
index 1d50000..eb1f69e 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -4,7 +4,7 @@ go 1.22.7
require (
github.com/fluxcd/pkg/apis/kustomize v1.7.0
- github.com/fluxcd/pkg/apis/meta v1.7.0
+ github.com/fluxcd/pkg/apis/meta v1.8.0
k8s.io/apiextensions-apiserver v0.31.3
k8s.io/apimachinery v0.31.3
sigs.k8s.io/controller-runtime v0.19.3
diff --git a/api/go.sum b/api/go.sum
index bfb1040..e1c1678 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -4,8 +4,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fluxcd/pkg/apis/kustomize v1.7.0 h1:4N23LccihQ3Ose/1FYZGwhrFSh63C9uOVFhwmSInOfQ=
github.com/fluxcd/pkg/apis/kustomize v1.7.0/go.mod h1:CqWLBcY2ZPW5f3k2sEypSfjIhz2hFs70PTTYIdKTMaY=
-github.com/fluxcd/pkg/apis/meta v1.7.0 h1:pDbPrBGgsiWV4bx8j+hodwv1Ysbj/pHP+FH46aTZOfs=
-github.com/fluxcd/pkg/apis/meta v1.7.0/go.mod h1:OJGH7I//SNO6zcso80oBRuf5H8oU8etZDeTgCcH7qHo=
+github.com/fluxcd/pkg/apis/meta v1.8.0 h1:wF7MJ3mu5ds9Y/exWU1yU0YyDb8s1VwwQnZYuMWli3c=
+github.com/fluxcd/pkg/apis/meta v1.8.0/go.mod h1:OJGH7I//SNO6zcso80oBRuf5H8oU8etZDeTgCcH7qHo=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
diff --git a/api/v2/helmrelease_types.go b/api/v2/helmrelease_types.go
index bebbafc..c4f8dcd 100644
--- a/api/v2/helmrelease_types.go
+++ b/api/v2/helmrelease_types.go
@@ -189,7 +189,7 @@ type HelmReleaseSpec struct {
// ValuesFrom holds references to resources containing Helm values for this HelmRelease,
// and information about how they should be merged.
- ValuesFrom []ValuesReference `json:"valuesFrom,omitempty"`
+ ValuesFrom []meta.ValuesReference `json:"valuesFrom,omitempty"`
// Values holds the values for this Helm release.
// +optional
diff --git a/api/v2/reference_types.go b/api/v2/reference_types.go
index fe7003e..419913b 100644
--- a/api/v2/reference_types.go
+++ b/api/v2/reference_types.go
@@ -68,48 +68,3 @@ type CrossNamespaceSourceReference struct {
// +optional
Namespace string `json:"namespace,omitempty"`
}
-
-// ValuesReference contains a reference to a resource containing Helm values,
-// and optionally the key they can be found at.
-type ValuesReference struct {
- // Kind of the values referent, valid values are ('Secret', 'ConfigMap').
- // +kubebuilder:validation:Enum=Secret;ConfigMap
- // +required
- Kind string `json:"kind"`
-
- // Name of the values referent. Should reside in the same namespace as the
- // referring resource.
- // +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:MaxLength=253
- // +required
- Name string `json:"name"`
-
- // ValuesKey is the data key where the values.yaml or a specific value can be
- // found at. Defaults to 'values.yaml'.
- // +kubebuilder:validation:MaxLength=253
- // +kubebuilder:validation:Pattern=`^[\-._a-zA-Z0-9]+$`
- // +optional
- ValuesKey string `json:"valuesKey,omitempty"`
-
- // TargetPath is the YAML dot notation path the value should be merged at. When
- // set, the ValuesKey is expected to be a single flat value. Defaults to 'None',
- // which results in the values getting merged at the root.
- // +kubebuilder:validation:MaxLength=250
- // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9_\-.\\\/]|\[[0-9]{1,5}\])+$`
- // +optional
- TargetPath string `json:"targetPath,omitempty"`
-
- // Optional marks this ValuesReference as optional. When set, a not found error
- // for the values reference is ignored, but any ValuesKey, TargetPath or
- // transient error will still result in a reconciliation failure.
- // +optional
- Optional bool `json:"optional,omitempty"`
-}
-
-// GetValuesKey returns the defined ValuesKey, or the default ('values.yaml').
-func (in ValuesReference) GetValuesKey() string {
- if in.ValuesKey == "" {
- return "values.yaml"
- }
- return in.ValuesKey
-}
diff --git a/api/v2/zz_generated.deepcopy.go b/api/v2/zz_generated.deepcopy.go
index c9f8e8f..2523a6a 100644
--- a/api/v2/zz_generated.deepcopy.go
+++ b/api/v2/zz_generated.deepcopy.go
@@ -326,7 +326,7 @@ func (in *HelmReleaseSpec) DeepCopyInto(out *HelmReleaseSpec) {
}
if in.ValuesFrom != nil {
in, out := &in.ValuesFrom, &out.ValuesFrom
- *out = make([]ValuesReference, len(*in))
+ *out = make([]meta.ValuesReference, len(*in))
copy(*out, *in)
}
if in.Values != nil {
@@ -717,18 +717,3 @@ func (in *UpgradeRemediation) DeepCopy() *UpgradeRemediation {
in.DeepCopyInto(out)
return out
}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ValuesReference) DeepCopyInto(out *ValuesReference) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValuesReference.
-func (in *ValuesReference) DeepCopy() *ValuesReference {
- if in == nil {
- return nil
- }
- out := new(ValuesReference)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/docs/api/v2/helm.md b/docs/api/v2/helm.md
index 1ab62cf..5ade58b 100644
--- a/docs/api/v2/helm.md
+++ b/docs/api/v2/helm.md
@@ -350,8 +350,8 @@ Uninstall
ValuesReference contains a reference to a resource containing Helm values,
-and optionally the key they can be found at.
-
-
-
-
-
-
Field
-
Description
-
-
-
-
-
-kind
-
-string
-
-
-
-
Kind of the values referent, valid values are (‘Secret’, ‘ConfigMap’).
-
-
-
-
-name
-
-string
-
-
-
-
Name of the values referent. Should reside in the same namespace as the
-referring resource.
-
-
-
-
-valuesKey
-
-string
-
-
-
-(Optional)
-
ValuesKey is the data key where the values.yaml or a specific value can be
-found at. Defaults to ‘values.yaml’.
-
-
-
-
-targetPath
-
-string
-
-
-
-(Optional)
-
TargetPath is the YAML dot notation path the value should be merged at. When
-set, the ValuesKey is expected to be a single flat value. Defaults to ‘None’,
-which results in the values getting merged at the root.
-
-
-
-
-optional
-
-bool
-
-
-
-(Optional)
-
Optional marks this ValuesReference as optional. When set, a not found error
-for the values reference is ignored, but any ValuesKey, TargetPath or
-transient error will still result in a reconciliation failure.
-
-
-
-
-
-
This page was automatically generated with gen-crd-api-reference-docs
diff --git a/go.mod b/go.mod
index 7c2b7f1..37f8507 100644
--- a/go.mod
+++ b/go.mod
@@ -21,8 +21,9 @@ require (
github.com/fluxcd/pkg/apis/acl v0.4.0
github.com/fluxcd/pkg/apis/event v0.11.0
github.com/fluxcd/pkg/apis/kustomize v1.7.0
- github.com/fluxcd/pkg/apis/meta v1.7.0
- github.com/fluxcd/pkg/runtime v0.50.0
+ github.com/fluxcd/pkg/apis/meta v1.8.0
+ github.com/fluxcd/pkg/chartutil v1.0.0
+ github.com/fluxcd/pkg/runtime v0.50.1
github.com/fluxcd/pkg/ssa v0.41.1
github.com/fluxcd/pkg/testserver v0.8.0
github.com/fluxcd/source-controller/api v1.4.1
diff --git a/go.sum b/go.sum
index f726909..95602eb 100644
--- a/go.sum
+++ b/go.sum
@@ -113,10 +113,12 @@ github.com/fluxcd/pkg/apis/event v0.11.0 h1:blvUbgko8EqqjMn1mju2U8aBXUntn3EWbMNc
github.com/fluxcd/pkg/apis/event v0.11.0/go.mod h1:AjoDg8Au7RpZbk8B5t3Q2Kq/6kXgmhtdXz6P1y2teAU=
github.com/fluxcd/pkg/apis/kustomize v1.7.0 h1:4N23LccihQ3Ose/1FYZGwhrFSh63C9uOVFhwmSInOfQ=
github.com/fluxcd/pkg/apis/kustomize v1.7.0/go.mod h1:CqWLBcY2ZPW5f3k2sEypSfjIhz2hFs70PTTYIdKTMaY=
-github.com/fluxcd/pkg/apis/meta v1.7.0 h1:pDbPrBGgsiWV4bx8j+hodwv1Ysbj/pHP+FH46aTZOfs=
-github.com/fluxcd/pkg/apis/meta v1.7.0/go.mod h1:OJGH7I//SNO6zcso80oBRuf5H8oU8etZDeTgCcH7qHo=
-github.com/fluxcd/pkg/runtime v0.50.0 h1:FKJQaOFv8SKp/t7yRE0EkHxA4RIr650SGTLJa1HY3AU=
-github.com/fluxcd/pkg/runtime v0.50.0/go.mod h1:NEjX8/1DL8B/dsjH1/FD9PjCLPhgdvsffSvzuFrgjys=
+github.com/fluxcd/pkg/apis/meta v1.8.0 h1:wF7MJ3mu5ds9Y/exWU1yU0YyDb8s1VwwQnZYuMWli3c=
+github.com/fluxcd/pkg/apis/meta v1.8.0/go.mod h1:OJGH7I//SNO6zcso80oBRuf5H8oU8etZDeTgCcH7qHo=
+github.com/fluxcd/pkg/chartutil v1.0.0 h1:Hj5mPiUp/nanZPVK7Ur0TDN4BCMhuoxKjvAmBbnX7DE=
+github.com/fluxcd/pkg/chartutil v1.0.0/go.mod h1:GBo3G78aiK48BppJ/YoDUv8L1NDLHrMpK3K5uiazQ0A=
+github.com/fluxcd/pkg/runtime v0.50.1 h1:VQeIJ2iq/BjsboGATRTCUQYMU737R0DboKXWVGyBhAI=
+github.com/fluxcd/pkg/runtime v0.50.1/go.mod h1:lBLhK6y/3kppfBsqmBs8wZ97dEmd44WzLp0iCci4DnY=
github.com/fluxcd/pkg/ssa v0.41.1 h1:VW87zsLYAKUvCxJhuEH7VzxVh3SxaU+PyApCT6gKjTk=
github.com/fluxcd/pkg/ssa v0.41.1/go.mod h1:7cbyLHqFd5FpcKvhxbHG3DkMm3cZteW45Mi78B0hg8g=
github.com/fluxcd/pkg/testserver v0.8.0 h1:ndlCjNpIueEmsLbyg97Dbkq/0Mfzxn4Kq4HSPEb71V8=
diff --git a/internal/action/reset.go b/internal/action/reset.go
index 8dbd499..556700a 100644
--- a/internal/action/reset.go
+++ b/internal/action/reset.go
@@ -22,7 +22,7 @@ import (
"helm.sh/helm/v3/pkg/chartutil"
v2 "github.com/fluxcd/helm-controller/api/v2"
- intchartutil "github.com/fluxcd/helm-controller/internal/chartutil"
+ intchartutil "github.com/fluxcd/pkg/chartutil"
)
const (
diff --git a/internal/action/verify.go b/internal/action/verify.go
index e21c63f..92e55e0 100644
--- a/internal/action/verify.go
+++ b/internal/action/verify.go
@@ -28,8 +28,8 @@ import (
helmdriver "helm.sh/helm/v3/pkg/storage/driver"
v2 "github.com/fluxcd/helm-controller/api/v2"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/release"
+ "github.com/fluxcd/pkg/chartutil"
)
var (
diff --git a/internal/chartutil/digest.go b/internal/chartutil/digest.go
deleted file mode 100644
index 5a5cf83..0000000
--- a/internal/chartutil/digest.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package chartutil
-
-import (
- "github.com/opencontainers/go-digest"
- "helm.sh/helm/v3/pkg/chartutil"
-
- intyaml "github.com/fluxcd/helm-controller/internal/yaml"
-)
-
-// DigestValues calculates the digest of the values using the provided algorithm.
-// The caller is responsible for ensuring that the algorithm is supported.
-func DigestValues(algo digest.Algorithm, values chartutil.Values) digest.Digest {
- digester := algo.Digester()
- if values = valuesOrNil(values); values != nil {
- if err := intyaml.Encode(digester.Hash(), values, intyaml.SortMapSlice); err != nil {
- return ""
- }
- }
- return digester.Digest()
-}
-
-// VerifyValues verifies the digest of the values against the provided digest.
-func VerifyValues(digest digest.Digest, values chartutil.Values) bool {
- if digest.Validate() != nil {
- return false
- }
-
- verifier := digest.Verifier()
- if values = valuesOrNil(values); values != nil {
- if err := intyaml.Encode(verifier, values, intyaml.SortMapSlice); err != nil {
- return false
- }
- }
- return verifier.Verified()
-}
-
-// valuesOrNil returns nil if the values are empty, otherwise the values are
-// returned. This is used to ensure that the digest is calculated against nil
-// opposed to an empty object.
-func valuesOrNil(values chartutil.Values) chartutil.Values {
- if values != nil && len(values) == 0 {
- return nil
- }
- return values
-}
diff --git a/internal/chartutil/digest_test.go b/internal/chartutil/digest_test.go
deleted file mode 100644
index 54368d4..0000000
--- a/internal/chartutil/digest_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package chartutil
-
-import (
- "testing"
-
- "github.com/opencontainers/go-digest"
- "helm.sh/helm/v3/pkg/chartutil"
-)
-
-func TestDigestValues(t *testing.T) {
- tests := []struct {
- name string
- algo digest.Algorithm
- values chartutil.Values
- want digest.Digest
- }{
- {
- name: "empty",
- algo: digest.SHA256,
- values: chartutil.Values{},
- want: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- },
- {
- name: "nil",
- algo: digest.SHA256,
- values: nil,
- want: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- },
- {
- name: "value map",
- algo: digest.SHA256,
- values: chartutil.Values{
- "replicas": 3,
- "image": map[string]interface{}{
- "tag": "latest",
- "repository": "nginx",
- },
- "ports": []interface{}{
- map[string]interface{}{
- "protocol": "TCP",
- "port": 8080,
- },
- map[string]interface{}{
- "port": 9090,
- "protocol": "UDP",
- },
- },
- },
- want: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
- },
- {
- name: "value map in different order",
- algo: digest.SHA256,
- values: chartutil.Values{
- "image": map[string]interface{}{
- "repository": "nginx",
- "tag": "latest",
- },
- "ports": []interface{}{
- map[string]interface{}{
- "port": 8080,
- "protocol": "TCP",
- },
- map[string]interface{}{
- "port": 9090,
- "protocol": "UDP",
- },
- },
- "replicas": 3,
- },
- want: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
- },
- {
- // Explicit test for something that does not work with sigs.k8s.io/yaml.
- // See: https://go.dev/play/p/KRyfK9ZobZx
- name: "values map with numeric keys",
- algo: digest.SHA256,
- values: chartutil.Values{
- "replicas": 3,
- "test": map[string]interface{}{
- "632bd80235a05f4192aefade": "value1",
- "632bd80ddf416cf32fd50679": "value2",
- "632bd817c559818a52307da2": "value3",
- "632bd82398e71231a98004b6": "value4",
- },
- },
- want: "sha256:8a980fcbeadd6f05818f07e8aec14070c22250ca3d96af1fcd5f93b3e85b4d70",
- },
- {
- name: "values map with numeric keys in different order",
- algo: digest.SHA256,
- values: chartutil.Values{
- "test": map[string]interface{}{
- "632bd82398e71231a98004b6": "value4",
- "632bd817c559818a52307da2": "value3",
- "632bd80ddf416cf32fd50679": "value2",
- "632bd80235a05f4192aefade": "value1",
- },
- "replicas": 3,
- },
- want: "sha256:8a980fcbeadd6f05818f07e8aec14070c22250ca3d96af1fcd5f93b3e85b4d70",
- },
- {
- name: "using different algorithm",
- algo: digest.SHA512,
- values: chartutil.Values{
- "foo": "bar",
- "baz": map[string]interface{}{
- "cool": "stuff",
- },
- },
- want: "sha512:b5f9cd4855ca3b08afd602557f373069b1732ce2e6d52341481b0d38f1938452e9d7759ab177c66699962b592f20ceded03eea3cd405d8670578c47842e2c550",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := DigestValues(tt.algo, tt.values); got != tt.want {
- t.Errorf("DigestValues() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestVerifyValues(t *testing.T) {
- tests := []struct {
- name string
- digest digest.Digest
- values chartutil.Values
- want bool
- }{
- {
- name: "empty values",
- digest: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- values: chartutil.Values{},
- want: true,
- },
- {
- name: "nil values",
- digest: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- values: nil,
- want: true,
- },
- {
- name: "empty digest",
- digest: "",
- want: false,
- },
- {
- name: "invalid digest",
- digest: "sha512:invalid",
- values: nil,
- want: false,
- },
- {
- name: "matching values",
- digest: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
- values: chartutil.Values{
- "image": map[string]interface{}{
- "repository": "nginx",
- "tag": "latest",
- },
- "ports": []interface{}{
- map[string]interface{}{
- "port": 8080,
- "protocol": "TCP",
- },
- map[string]interface{}{
- "port": 9090,
- "protocol": "UDP",
- },
- },
- "replicas": 3,
- },
- want: true,
- },
- {
- name: "matching values in different order",
- digest: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
- values: chartutil.Values{
- "replicas": 3,
- "image": map[string]interface{}{
- "tag": "latest",
- "repository": "nginx",
- },
- "ports": []interface{}{
- map[string]interface{}{
- "protocol": "TCP",
- "port": 8080,
- },
- map[string]interface{}{
- "port": 9090,
- "protocol": "UDP",
- },
- },
- },
- want: true,
- },
- {
- name: "matching values with numeric keys",
- digest: "sha256:8a980fcbeadd6f05818f07e8aec14070c22250ca3d96af1fcd5f93b3e85b4d70",
- values: chartutil.Values{
- "replicas": 3,
- "test": map[string]interface{}{
- "632bd80235a05f4192aefade": "value1",
- "632bd80ddf416cf32fd50679": "value2",
- "632bd817c559818a52307da2": "value3",
- "632bd82398e71231a98004b6": "value4",
- },
- },
- want: true,
- },
- {
- name: "mismatching values",
- digest: "sha256:3f3641788a2d4abda3534eaa90c90b54916e4c6e3a5b2e1b24758b7bfa701ecd",
- values: chartutil.Values{
- "foo": "bar",
- },
- want: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := VerifyValues(tt.digest, tt.values); got != tt.want {
- t.Errorf("VerifyValues() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/internal/chartutil/values.go b/internal/chartutil/values.go
deleted file mode 100644
index f925a0a..0000000
--- a/internal/chartutil/values.go
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package chartutil
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "helm.sh/helm/v3/pkg/chartutil"
- "helm.sh/helm/v3/pkg/strvals"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/types"
- ctrl "sigs.k8s.io/controller-runtime"
- kubeclient "sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/fluxcd/pkg/runtime/transform"
-
- v2 "github.com/fluxcd/helm-controller/api/v2"
-)
-
-// ErrValuesRefReason is the descriptive reason for an ErrValuesReference.
-type ErrValuesRefReason error
-
-var (
- // ErrResourceNotFound signals the referenced values resource could not be
- // found.
- ErrResourceNotFound = errors.New("resource not found")
- // ErrKeyNotFound signals the key could not be found in the referenced
- // values resource.
- ErrKeyNotFound = errors.New("key not found")
- // ErrUnsupportedRefKind signals the values reference kind is not
- // supported.
- ErrUnsupportedRefKind = errors.New("unsupported values reference kind")
- // ErrValuesDataRead signals the referenced resource's values data could
- // not be read.
- ErrValuesDataRead = errors.New("failed to read values data")
- // ErrValueMerge signals a single value could not be merged into the
- // values.
- ErrValueMerge = errors.New("failed to merge value")
- // ErrUnknown signals the reason an error occurred is unknown.
- ErrUnknown = errors.New("unknown error")
-)
-
-// ErrValuesReference is returned by ChartValuesFromReferences
-type ErrValuesReference struct {
- // Reason for the values reference error. Nil equals ErrUnknown.
- // Can be used with Is to reason about a returned error:
- // err := &ErrValuesReference{Reason: ErrResourceNotFound, ...}
- // errors.Is(err, ErrResourceNotFound)
- Reason ErrValuesRefReason
- // Kind of the values reference the error is being reported for.
- Kind string
- // Name of the values reference the error is being reported for.
- Name types.NamespacedName
- // Key of the values reference the error is being reported for.
- Key string
- // Optional indicates if the error is being reported for an optional values
- // reference.
- Optional bool
- // Err contains the further error chain leading to this error, it can be
- // nil.
- Err error
-}
-
-// Error returns an error string constructed out of the state of
-// ErrValuesReference.
-func (e *ErrValuesReference) Error() string {
- b := strings.Builder{}
- b.WriteString("could not resolve")
- if e.Optional {
- b.WriteString(" optional")
- }
- if kind := e.Kind; kind != "" {
- b.WriteString(" " + kind)
- }
- b.WriteString(" chart values reference")
- if name := e.Name.String(); name != "" {
- b.WriteString(fmt.Sprintf(" '%s'", name))
- }
- if key := e.Key; key != "" {
- b.WriteString(fmt.Sprintf(" with key '%s'", key))
- }
- reason := e.Reason.Error()
- if reason == "" && e.Err == nil {
- reason = ErrUnknown.Error()
- }
- if e.Err != nil {
- reason = e.Err.Error()
- }
- b.WriteString(": " + reason)
- return b.String()
-}
-
-// Is returns if target == Reason, or target == Err.
-// Can be used to Reason about a returned error:
-//
-// err := &ErrValuesReference{Reason: ErrResourceNotFound, ...}
-// errors.Is(err, ErrResourceNotFound)
-func (e *ErrValuesReference) Is(target error) bool {
- reason := e.Reason
- if reason == nil {
- reason = ErrUnknown
- }
- if reason == target {
- return true
- }
- return errors.Is(e.Err, target)
-}
-
-// Unwrap returns the wrapped Err.
-func (e *ErrValuesReference) Unwrap() error {
- return e.Err
-}
-
-// NewErrValuesReference returns a new ErrValuesReference constructed from the
-// provided values.
-func NewErrValuesReference(name types.NamespacedName, ref v2.ValuesReference, reason ErrValuesRefReason, err error) *ErrValuesReference {
- return &ErrValuesReference{
- Reason: reason,
- Kind: ref.Kind,
- Name: name,
- Key: ref.GetValuesKey(),
- Optional: ref.Optional,
- Err: err,
- }
-}
-
-const (
- kindConfigMap = "ConfigMap"
- kindSecret = "Secret"
-)
-
-// ChartValuesFromReferences attempts to construct new chart values by resolving
-// the provided references using the client, merging them in the order given.
-// If provided, the values map is merged in last overwriting values from references,
-// unless a reference has a targetPath specified, in which case it will overwrite all.
-// It returns the merged values, or an ErrValuesReference error.
-func ChartValuesFromReferences(ctx context.Context, client kubeclient.Client, namespace string,
- values map[string]interface{}, refs ...v2.ValuesReference) (chartutil.Values, error) {
-
- log := ctrl.LoggerFrom(ctx)
-
- result := chartutil.Values{}
- resources := make(map[string]kubeclient.Object)
-
- for _, ref := range refs {
- namespacedName := types.NamespacedName{Namespace: namespace, Name: ref.Name}
- var valuesData []byte
-
- switch ref.Kind {
- case kindConfigMap, kindSecret:
- index := ref.Kind + namespacedName.String()
-
- resource, ok := resources[index]
- if !ok {
- // The resource may not exist, but we want to act on a single version
- // of the resource in case the values reference is marked as optional.
- resources[index] = nil
-
- switch ref.Kind {
- case kindSecret:
- resource = &corev1.Secret{}
- case kindConfigMap:
- resource = &corev1.ConfigMap{}
- }
-
- if resource != nil {
- if err := client.Get(ctx, namespacedName, resource); err != nil {
- if apierrors.IsNotFound(err) {
- err := NewErrValuesReference(namespacedName, ref, ErrResourceNotFound, err)
- if err.Optional {
- log.Info(err.Error())
- continue
- }
- return nil, err
- }
- return nil, err
- }
- resources[index] = resource
- }
- }
-
- if resource == nil {
- if ref.Optional {
- continue
- }
- return nil, NewErrValuesReference(namespacedName, ref, ErrResourceNotFound, nil)
- }
-
- switch typedRes := resource.(type) {
- case *corev1.Secret:
- data, ok := typedRes.Data[ref.GetValuesKey()]
- if !ok {
- err := NewErrValuesReference(namespacedName, ref, ErrKeyNotFound, nil)
- if ref.Optional {
- log.Info(err.Error())
- continue
- }
- return nil, NewErrValuesReference(namespacedName, ref, ErrKeyNotFound, nil)
- }
- valuesData = data
- case *corev1.ConfigMap:
- data, ok := typedRes.Data[ref.GetValuesKey()]
- if !ok {
- err := NewErrValuesReference(namespacedName, ref, ErrKeyNotFound, nil)
- if ref.Optional {
- log.Info(err.Error())
- continue
- }
- return nil, err
- }
- valuesData = []byte(data)
- default:
- return nil, NewErrValuesReference(namespacedName, ref, ErrUnsupportedRefKind, nil)
- }
- default:
- return nil, NewErrValuesReference(namespacedName, ref, ErrUnsupportedRefKind, nil)
- }
-
- if ref.TargetPath != "" {
- result = transform.MergeMaps(result, values)
-
- // TODO(hidde): this is a bit of hack, as it mimics the way the option string is passed
- // to Helm from a CLI perspective. Given the parser is however not publicly accessible
- // while it contains all logic around parsing the target path, it is a fair trade-off.
- if err := ReplacePathValue(result, ref.TargetPath, string(valuesData)); err != nil {
- return nil, NewErrValuesReference(namespacedName, ref, ErrValueMerge, err)
- }
- continue
- }
-
- values, err := chartutil.ReadValues(valuesData)
- if err != nil {
- return nil, NewErrValuesReference(namespacedName, ref, ErrValuesDataRead, err)
- }
- result = transform.MergeMaps(result, values)
- }
- return transform.MergeMaps(result, values), nil
-}
-
-// ReplacePathValue replaces the value at the dot notation path with the given
-// value using Helm's string value parser using strvals.ParseInto. Single or
-// double-quoted values are merged using strvals.ParseIntoString.
-func ReplacePathValue(values chartutil.Values, path string, value string) error {
- const (
- singleQuote = "'"
- doubleQuote = `"`
- )
- isSingleQuoted := strings.HasPrefix(value, singleQuote) && strings.HasSuffix(value, singleQuote)
- isDoubleQuoted := strings.HasPrefix(value, doubleQuote) && strings.HasSuffix(value, doubleQuote)
- if isSingleQuoted || isDoubleQuoted {
- value = strings.Trim(value, singleQuote+doubleQuote)
- value = path + "=" + value
- return strvals.ParseIntoString(value, values)
- }
- value = path + "=" + value
- return strvals.ParseInto(value, values)
-}
diff --git a/internal/chartutil/values_fuzz_test.go b/internal/chartutil/values_fuzz_test.go
deleted file mode 100644
index fc6a248..0000000
--- a/internal/chartutil/values_fuzz_test.go
+++ /dev/null
@@ -1,186 +0,0 @@
-//go:build gofuzz_libfuzzer
-// +build gofuzz_libfuzzer
-
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package chartutil
-
-import (
- "context"
- "testing"
-
- "github.com/go-logr/logr"
- "helm.sh/helm/v3/pkg/chartutil"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
-
- v2 "github.com/fluxcd/helm-controller/api/v2"
-)
-
-func FuzzChartValuesFromReferences(f *testing.F) {
- scheme := testScheme()
-
- tests := []struct {
- targetPath string
- valuesKey string
- hrValues string
- createObject bool
- secretData []byte
- configData string
- }{
- {
- targetPath: "flat",
- valuesKey: "custom-values.yaml",
- secretData: []byte(`flat:
- nested: value
-nested: value
-`),
- configData: `flat: value
-nested:
- configuration: value
-`,
- hrValues: `
-other: values
-`,
- createObject: true,
- },
- {
- targetPath: "'flat'",
- valuesKey: "custom-values.yaml",
- secretData: []byte(`flat:
- nested: value
-nested: value
-`),
- configData: `flat: value
-nested:
- configuration: value
-`,
- hrValues: `
-other: values
-`,
- createObject: true,
- },
- {
- targetPath: "flat[0]",
- secretData: []byte(``),
- configData: `flat: value`,
- hrValues: `
-other: values
-`,
- createObject: true,
- },
- {
- secretData: []byte(`flat:
- nested: value
-nested: value
-`),
- configData: `flat: value
-nested:
- configuration: value
-`,
- hrValues: `
-other: values
-`,
- createObject: true,
- },
- {
- targetPath: "some-value",
- hrValues: `
-other: values
-`,
- createObject: false,
- },
- }
-
- for _, tt := range tests {
- f.Add(tt.targetPath, tt.valuesKey, tt.hrValues, tt.createObject, tt.secretData, tt.configData)
- }
-
- f.Fuzz(func(t *testing.T,
- targetPath, valuesKey, hrValues string, createObject bool, secretData []byte, configData string) {
-
- // objectName and objectNamespace represent a name reference to a core
- // Kubernetes object upstream (Secret/ConfigMap) which is validated upstream,
- // and also validated by us in the OpenAPI-based validation set in
- // v2.ValuesReference. Therefore, a static value here suffices, and instead
- // we just play with the objects presence/absence.
- objectName := "values"
- objectNamespace := "default"
- var resources []runtime.Object
-
- if createObject {
- resources = append(resources,
- mockConfigMap(objectName, map[string]string{valuesKey: configData}),
- mockSecret(objectName, map[string][]byte{valuesKey: secretData}),
- )
- }
-
- references := []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: objectName,
- ValuesKey: valuesKey,
- TargetPath: targetPath,
- },
- {
- Kind: kindSecret,
- Name: objectName,
- ValuesKey: valuesKey,
- TargetPath: targetPath,
- },
- }
-
- c := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(resources...)
- var values chartutil.Values
- if hrValues != "" {
- values, _ = chartutil.ReadValues([]byte(hrValues))
- }
-
- _, _ = ChartValuesFromReferences(logr.NewContext(context.TODO(), logr.Discard()), c.Build(), objectNamespace, values, references...)
- })
-}
-
-func mockSecret(name string, data map[string][]byte) *corev1.Secret {
- return &corev1.Secret{
- TypeMeta: metav1.TypeMeta{
- Kind: kindSecret,
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{Name: name},
- Data: data,
- }
-}
-
-func mockConfigMap(name string, data map[string]string) *corev1.ConfigMap {
- return &corev1.ConfigMap{
- TypeMeta: metav1.TypeMeta{
- Kind: kindConfigMap,
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{Name: name},
- Data: data,
- }
-}
-
-func testScheme() *runtime.Scheme {
- scheme := runtime.NewScheme()
- _ = corev1.AddToScheme(scheme)
- _ = v2.AddToScheme(scheme)
- return scheme
-}
diff --git a/internal/chartutil/values_test.go b/internal/chartutil/values_test.go
deleted file mode 100644
index fa11ba6..0000000
--- a/internal/chartutil/values_test.go
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package chartutil
-
-import (
- "context"
- "testing"
-
- "github.com/go-logr/logr"
- . "github.com/onsi/gomega"
- "helm.sh/helm/v3/pkg/chartutil"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
-
- v2 "github.com/fluxcd/helm-controller/api/v2"
-)
-
-func TestChartValuesFromReferences(t *testing.T) {
- scheme := testScheme()
-
- tests := []struct {
- name string
- resources []runtime.Object
- namespace string
- references []v2.ValuesReference
- values string
- want chartutil.Values
- wantErr bool
- }{
- {
- name: "merges",
- resources: []runtime.Object{
- mockConfigMap("values", map[string]string{
- "values.yaml": `flat: value
-nested:
- configuration: value
-`,
- }),
- mockSecret("values", map[string][]byte{
- "values.yaml": []byte(`flat:
- nested: value
-nested: value
-`),
- }),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: "values",
- },
- {
- Kind: kindSecret,
- Name: "values",
- },
- },
- values: `
-other: values
-`,
- want: chartutil.Values{
- "flat": map[string]interface{}{
- "nested": "value",
- },
- "nested": "value",
- "other": "values",
- },
- },
- {
- name: "with target path",
- resources: []runtime.Object{
- mockSecret("values", map[string][]byte{"single": []byte("value")}),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindSecret,
- Name: "values",
- ValuesKey: "single",
- TargetPath: "merge.at.specific.path",
- },
- },
- want: chartutil.Values{
- "merge": map[string]interface{}{
- "at": map[string]interface{}{
- "specific": map[string]interface{}{
- "path": "value",
- },
- },
- },
- },
- },
- {
- name: "target path precedence over all",
- resources: []runtime.Object{
- mockConfigMap("values", map[string]string{
- "values.yaml": `flat: value
-nested:
- configuration:
- - one
- - two
- - three
-`,
- }),
- mockSecret("values", map[string][]byte{"key": []byte("value")}),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindSecret,
- Name: "values",
- ValuesKey: "key",
- TargetPath: "nested.configuration[0]",
- },
- {
- Kind: kindConfigMap,
- Name: "values",
- },
- },
-
- values: `
-nested:
- configuration:
- - list
- - item
- - option
-`,
- want: chartutil.Values{
- "flat": "value",
- "nested": map[string]interface{}{
- "configuration": []interface{}{"value", "item", "option"},
- },
- },
- },
- {
- name: "target path for string type array item",
- resources: []runtime.Object{
- mockConfigMap("values", map[string]string{
- "values.yaml": `flat: value
-nested:
- configuration:
- - list
- - item
- - option
-`,
- }),
- mockSecret("values", map[string][]byte{
- "values.yaml": []byte(`foo`),
- }),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: "values",
- },
- {
- Kind: kindSecret,
- Name: "values",
- TargetPath: "nested.configuration[1]",
- },
- },
- values: `
-other: values
-`,
- want: chartutil.Values{
- "flat": "value",
- "nested": map[string]interface{}{
- "configuration": []interface{}{"list", "foo", "option"},
- },
- "other": "values",
- },
- },
- {
- name: "values reference to non existing secret",
- references: []v2.ValuesReference{
- {
- Kind: kindSecret,
- Name: "missing",
- },
- },
- wantErr: true,
- },
- {
- name: "optional values reference to non existing secret",
- references: []v2.ValuesReference{
- {
- Kind: kindSecret,
- Name: "missing",
- Optional: true,
- },
- },
- want: chartutil.Values{},
- wantErr: false,
- },
- {
- name: "values reference to non existing config map",
- references: []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: "missing",
- },
- },
- wantErr: true,
- },
- {
- name: "optional values reference to non existing config map",
- references: []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: "missing",
- Optional: true,
- },
- },
- want: chartutil.Values{},
- wantErr: false,
- },
- {
- name: "missing secret key",
- resources: []runtime.Object{
- mockSecret("values", nil),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindSecret,
- Name: "values",
- ValuesKey: "nonexisting",
- },
- },
- wantErr: true,
- },
- {
- name: "missing config map key",
- resources: []runtime.Object{
- mockConfigMap("values", nil),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: "values",
- ValuesKey: "nonexisting",
- },
- },
- wantErr: true,
- },
- {
- name: "unsupported values reference kind",
- references: []v2.ValuesReference{
- {
- Kind: "Unsupported",
- },
- },
- wantErr: true,
- },
- {
- name: "invalid values",
- resources: []runtime.Object{
- mockConfigMap("values", map[string]string{
- "values.yaml": `
-invalid`,
- }),
- },
- references: []v2.ValuesReference{
- {
- Kind: kindConfigMap,
- Name: "values",
- },
- },
- wantErr: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- c := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tt.resources...)
- var values map[string]interface{}
- if tt.values != "" {
- m, err := chartutil.ReadValues([]byte(tt.values))
- g.Expect(err).ToNot(HaveOccurred())
- values = m
- }
- ctx := logr.NewContext(context.TODO(), logr.Discard())
- got, err := ChartValuesFromReferences(ctx, c.Build(), tt.namespace, values, tt.references...)
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- g.Expect(got).To(BeNil())
- return
- }
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(got).To(Equal(tt.want))
- })
- }
-}
-
-// This tests compatability with the formats described in:
-// https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set
-func TestReplacePathValue(t *testing.T) {
- tests := []struct {
- name string
- value []byte
- path string
- want map[string]interface{}
- wantErr bool
- }{
- {
- name: "outer inner",
- value: []byte("value"),
- path: "outer.inner",
- want: map[string]interface{}{
- "outer": map[string]interface{}{
- "inner": "value",
- },
- },
- },
- {
- name: "inline list",
- value: []byte("{a,b,c}"),
- path: "name",
- want: map[string]interface{}{
- // TODO(hidde): figure out why the cap is off by len+1
- "name": append(make([]interface{}, 0, 4), []interface{}{"a", "b", "c"}...),
- },
- },
- {
- name: "with escape",
- value: []byte(`value1\,value2`),
- path: "name",
- want: map[string]interface{}{
- "name": "value1,value2",
- },
- },
- {
- name: "target path with boolean value",
- value: []byte("true"),
- path: "merge.at.specific.path",
- want: chartutil.Values{
- "merge": map[string]interface{}{
- "at": map[string]interface{}{
- "specific": map[string]interface{}{
- "path": true,
- },
- },
- },
- },
- },
- {
- name: "target path with set-string behavior",
- value: []byte(`"true"`),
- path: "merge.at.specific.path",
- want: chartutil.Values{
- "merge": map[string]interface{}{
- "at": map[string]interface{}{
- "specific": map[string]interface{}{
- "path": "true",
- },
- },
- },
- },
- },
- {
- name: "target path with array item",
- value: []byte("value"),
- path: "merge.at[2]",
- want: chartutil.Values{
- "merge": map[string]interface{}{
- "at": []interface{}{nil, nil, "value"},
- },
- },
- },
- {
- name: "dot sequence escaping path",
- value: []byte("master"),
- path: `nodeSelector.kubernetes\.io/role`,
- want: map[string]interface{}{
- "nodeSelector": map[string]interface{}{
- "kubernetes.io/role": "master",
- },
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- values := map[string]interface{}{}
- err := ReplacePathValue(values, tt.path, string(tt.value))
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- g.Expect(values).To(BeNil())
- return
- }
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(values).To(Equal(tt.want))
- })
- }
-}
-
-func mockSecret(name string, data map[string][]byte) *corev1.Secret {
- return &corev1.Secret{
- TypeMeta: metav1.TypeMeta{
- Kind: kindSecret,
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{Name: name},
- Data: data,
- }
-}
-
-func mockConfigMap(name string, data map[string]string) *corev1.ConfigMap {
- return &corev1.ConfigMap{
- TypeMeta: metav1.TypeMeta{
- Kind: kindConfigMap,
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{Name: name},
- Data: data,
- }
-}
-
-func testScheme() *runtime.Scheme {
- scheme := runtime.NewScheme()
- _ = corev1.AddToScheme(scheme)
- _ = v2.AddToScheme(scheme)
- return scheme
-}
diff --git a/internal/controller/helmrelease_controller.go b/internal/controller/helmrelease_controller.go
index 0f0dc20..1afe42f 100644
--- a/internal/controller/helmrelease_controller.go
+++ b/internal/controller/helmrelease_controller.go
@@ -58,10 +58,11 @@ import (
sourcev1 "github.com/fluxcd/source-controller/api/v1"
sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2"
+ "github.com/fluxcd/pkg/chartutil"
+
v2 "github.com/fluxcd/helm-controller/api/v2"
intacl "github.com/fluxcd/helm-controller/internal/acl"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
interrors "github.com/fluxcd/helm-controller/internal/errors"
"github.com/fluxcd/helm-controller/internal/features"
@@ -307,7 +308,12 @@ func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, patchHelpe
}
// Compose values based from the spec and references.
- values, err := chartutil.ChartValuesFromReferences(ctx, r.Client, obj.Namespace, obj.GetValues(), obj.Spec.ValuesFrom...)
+ values, err := chartutil.ChartValuesFromReferences(ctx,
+ log,
+ r.Client,
+ obj.Namespace,
+ obj.GetValues(),
+ obj.Spec.ValuesFrom...)
if err != nil {
conditions.MarkFalse(obj, meta.ReadyCondition, "ValuesError", "%s", err)
r.Eventf(obj, corev1.EventTypeWarning, "ValuesError", err.Error())
diff --git a/internal/controller/helmrelease_controller_test.go b/internal/controller/helmrelease_controller_test.go
index 8399679..0ff4f73 100644
--- a/internal/controller/helmrelease_controller_test.go
+++ b/internal/controller/helmrelease_controller_test.go
@@ -49,6 +49,7 @@ import (
aclv1 "github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/kustomize"
"github.com/fluxcd/pkg/apis/meta"
+ "github.com/fluxcd/pkg/chartutil"
"github.com/fluxcd/pkg/runtime/conditions"
feathelper "github.com/fluxcd/pkg/runtime/features"
"github.com/fluxcd/pkg/runtime/patch"
@@ -58,7 +59,6 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
intacl "github.com/fluxcd/helm-controller/internal/acl"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/features"
"github.com/fluxcd/helm-controller/internal/kube"
"github.com/fluxcd/helm-controller/internal/postrender"
@@ -331,7 +331,7 @@ func TestHelmReleaseReconciler_reconcileRelease(t *testing.T) {
Namespace: "mock",
},
Spec: v2.HelmReleaseSpec{
- ValuesFrom: []v2.ValuesReference{
+ ValuesFrom: []meta.ValuesReference{
{
Kind: "Secret",
Name: "missing",
@@ -1537,7 +1537,7 @@ func TestHelmReleaseReconciler_reconcileReleaseFromOCIRepositorySource(t *testin
Name: "ocirepo",
Namespace: "mock",
},
- ValuesFrom: []v2.ValuesReference{
+ ValuesFrom: []meta.ValuesReference{
{
Kind: "Secret",
Name: "missing",
@@ -3526,12 +3526,12 @@ func Test_waitForHistoryCacheSync(t *testing.T) {
func TestValuesReferenceValidation(t *testing.T) {
tests := []struct {
name string
- references []v2.ValuesReference
+ references []meta.ValuesReference
wantErr bool
}{
{
name: "valid ValuesKey",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3542,7 +3542,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "valid ValuesKey: empty",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3553,7 +3553,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "valid ValuesKey: long",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3564,7 +3564,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "invalid ValuesKey",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3575,7 +3575,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "invalid ValuesKey: too long",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3586,7 +3586,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "valid target path: empty",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3597,7 +3597,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "valid target path",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3608,7 +3608,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "valid target path: long",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3619,7 +3619,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "invalid target path: too long",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3630,7 +3630,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "invalid target path: opened index",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
@@ -3642,7 +3642,7 @@ func TestValuesReferenceValidation(t *testing.T) {
},
{
name: "invalid target path: incorrect index syntax",
- references: []v2.ValuesReference{
+ references: []meta.ValuesReference{
{
Kind: "Secret",
Name: "values",
diff --git a/internal/reconcile/install.go b/internal/reconcile/install.go
index 50747d8..d07941b 100644
--- a/internal/reconcile/install.go
+++ b/internal/reconcile/install.go
@@ -30,8 +30,8 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
+ "github.com/fluxcd/pkg/chartutil"
)
// Install is an ActionReconciler which attempts to install a Helm release
diff --git a/internal/reconcile/install_test.go b/internal/reconcile/install_test.go
index 463b561..c1c6059 100644
--- a/internal/reconcile/install_test.go
+++ b/internal/reconcile/install_test.go
@@ -40,11 +40,11 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
func TestInstall_Reconcile(t *testing.T) {
diff --git a/internal/reconcile/rollback_remediation.go b/internal/reconcile/rollback_remediation.go
index ecf1a72..d1d492a 100644
--- a/internal/reconcile/rollback_remediation.go
+++ b/internal/reconcile/rollback_remediation.go
@@ -31,10 +31,10 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
+ "github.com/fluxcd/pkg/chartutil"
)
// RollbackRemediation is an ActionReconciler which attempts to roll back
diff --git a/internal/reconcile/rollback_remediation_test.go b/internal/reconcile/rollback_remediation_test.go
index d455fa3..1752533 100644
--- a/internal/reconcile/rollback_remediation_test.go
+++ b/internal/reconcile/rollback_remediation_test.go
@@ -39,11 +39,11 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
func TestRollbackRemediation_Reconcile(t *testing.T) {
diff --git a/internal/reconcile/test_test.go b/internal/reconcile/test_test.go
index 5037746..da1f7ef 100644
--- a/internal/reconcile/test_test.go
+++ b/internal/reconcile/test_test.go
@@ -38,10 +38,10 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
// testHookFixtures is a list of release.Hook in every possible LastRun state.
diff --git a/internal/reconcile/uninstall_remediation_test.go b/internal/reconcile/uninstall_remediation_test.go
index 5f1dfb3..1b89158 100644
--- a/internal/reconcile/uninstall_remediation_test.go
+++ b/internal/reconcile/uninstall_remediation_test.go
@@ -37,11 +37,11 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
func TestUninstallRemediation_Reconcile(t *testing.T) {
diff --git a/internal/reconcile/uninstall_test.go b/internal/reconcile/uninstall_test.go
index a0e19aa..ba21f6e 100644
--- a/internal/reconcile/uninstall_test.go
+++ b/internal/reconcile/uninstall_test.go
@@ -38,11 +38,11 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
func TestUninstall_Reconcile(t *testing.T) {
diff --git a/internal/reconcile/unlock_test.go b/internal/reconcile/unlock_test.go
index 4a1459e..4b441e9 100644
--- a/internal/reconcile/unlock_test.go
+++ b/internal/reconcile/unlock_test.go
@@ -38,11 +38,11 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
func TestUnlock_Reconcile(t *testing.T) {
diff --git a/internal/reconcile/upgrade.go b/internal/reconcile/upgrade.go
index fba330d..ebb14d4 100644
--- a/internal/reconcile/upgrade.go
+++ b/internal/reconcile/upgrade.go
@@ -30,8 +30,8 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
+ "github.com/fluxcd/pkg/chartutil"
)
// Upgrade is an ActionReconciler which attempts to upgrade a Helm release
diff --git a/internal/reconcile/upgrade_test.go b/internal/reconcile/upgrade_test.go
index 65eb6e2..6418624 100644
--- a/internal/reconcile/upgrade_test.go
+++ b/internal/reconcile/upgrade_test.go
@@ -40,11 +40,11 @@ import (
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/release"
"github.com/fluxcd/helm-controller/internal/storage"
"github.com/fluxcd/helm-controller/internal/testutil"
+ "github.com/fluxcd/pkg/chartutil"
)
func TestUpgrade_Reconcile(t *testing.T) {
diff --git a/internal/release/observation.go b/internal/release/observation.go
index 71ec186..fa80889 100644
--- a/internal/release/observation.go
+++ b/internal/release/observation.go
@@ -26,8 +26,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
- "github.com/fluxcd/helm-controller/internal/chartutil"
"github.com/fluxcd/helm-controller/internal/digest"
+ "github.com/fluxcd/pkg/chartutil"
)
var (
diff --git a/internal/yaml/encode.go b/internal/yaml/encode.go
deleted file mode 100644
index b8eab3a..0000000
--- a/internal/yaml/encode.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2023 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "io"
-
- "sigs.k8s.io/yaml"
- goyaml "sigs.k8s.io/yaml/goyaml.v2"
-)
-
-// PreEncoder allows for pre-processing of the YAML data before encoding.
-type PreEncoder func(goyaml.MapSlice)
-
-// Encode encodes the given data to YAML format and writes it to the provided
-// io.Write, without going through a byte representation (unlike
-// sigs.k8s.io/yaml#Unmarshal).
-//
-// It optionally takes one or more PreEncoder functions that allow
-// for pre-processing of the data before encoding, such as sorting the data.
-//
-// It returns an error if the data cannot be encoded.
-func Encode(w io.Writer, data map[string]interface{}, pe ...PreEncoder) error {
- ms := yaml.JSONObjectToYAMLObject(data)
- for _, m := range pe {
- m(ms)
- }
- return goyaml.NewEncoder(w).Encode(ms)
-}
diff --git a/internal/yaml/encode_test.go b/internal/yaml/encode_test.go
deleted file mode 100644
index 048c221..0000000
--- a/internal/yaml/encode_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
-Copyright 2023 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "bytes"
- "os"
- "testing"
-
- "sigs.k8s.io/yaml"
-)
-
-func TestEncode(t *testing.T) {
- tests := []struct {
- name string
- input map[string]interface{}
- preEncoders []PreEncoder
- want []byte
- }{
- {
- name: "empty map",
- input: map[string]interface{}{},
- want: []byte(`{}
-`),
- },
- {
- name: "simple values",
- input: map[string]interface{}{
- "replicaCount": 3,
- },
- want: []byte(`replicaCount: 3
-`),
- },
- {
- name: "with pre-encoder",
- input: map[string]interface{}{
- "replicaCount": 3,
- "image": map[string]interface{}{
- "repository": "nginx",
- "tag": "latest",
- },
- "port": 8080,
- },
- preEncoders: []PreEncoder{SortMapSlice},
- want: []byte(`image:
- repository: nginx
- tag: latest
-port: 8080
-replicaCount: 3
-`),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- var actual bytes.Buffer
- err := Encode(&actual, tt.input, tt.preEncoders...)
- if err != nil {
- t.Fatalf("error encoding: %v", err)
- }
-
- if !bytes.Equal(actual.Bytes(), tt.want) {
- t.Errorf("Encode() = %v, want: %s", actual.String(), tt.want)
- }
- })
- }
-}
-
-func BenchmarkEncode(b *testing.B) {
- // Test against the values.yaml from the kube-prometheus-stack chart, which
- // is a fairly large file.
- v, err := os.ReadFile("testdata/values.yaml")
- if err != nil {
- b.Fatalf("error reading testdata: %v", err)
- }
-
- var data map[string]interface{}
- if err = yaml.Unmarshal(v, &data); err != nil {
- b.Fatalf("error unmarshalling testdata: %v", err)
- }
-
- b.Run("EncodeWithSort", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Encode(bytes.NewBuffer(nil), data, SortMapSlice)
- }
- })
-
- b.Run("SigYAMLMarshal", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- yaml.Marshal(data)
- }
- })
-}
diff --git a/internal/yaml/sort.go b/internal/yaml/sort.go
deleted file mode 100644
index b00d772..0000000
--- a/internal/yaml/sort.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2023 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "sort"
-
- goyaml "sigs.k8s.io/yaml/goyaml.v2"
-)
-
-// SortMapSlice recursively sorts the given goyaml.MapSlice by key.
-// It can be used in combination with Encode to sort YAML by key
-// before encoding it.
-func SortMapSlice(ms goyaml.MapSlice) {
- sort.Slice(ms, func(i, j int) bool {
- return ms[i].Key.(string) < ms[j].Key.(string)
- })
-
- for _, item := range ms {
- if nestedMS, ok := item.Value.(goyaml.MapSlice); ok {
- SortMapSlice(nestedMS)
- } else if nestedSlice, ok := item.Value.([]interface{}); ok {
- for _, vItem := range nestedSlice {
- if nestedMS, ok := vItem.(goyaml.MapSlice); ok {
- SortMapSlice(nestedMS)
- }
- }
- }
- }
-}
diff --git a/internal/yaml/sort_test.go b/internal/yaml/sort_test.go
deleted file mode 100644
index 82fdaf3..0000000
--- a/internal/yaml/sort_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
-Copyright 2023 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "bytes"
- "testing"
-
- "sigs.k8s.io/yaml"
- goyaml "sigs.k8s.io/yaml/goyaml.v2"
-)
-
-func TestSortMapSlice(t *testing.T) {
- tests := []struct {
- name string
- input map[string]interface{}
- want map[string]interface{}
- }{
- {
- name: "empty map",
- input: map[string]interface{}{},
- want: map[string]interface{}{},
- },
- {
- name: "flat map",
- input: map[string]interface{}{
- "b": "value-b",
- "a": "value-a",
- "c": "value-c",
- },
- want: map[string]interface{}{
- "a": "value-a",
- "b": "value-b",
- "c": "value-c",
- },
- },
- {
- name: "nested map",
- input: map[string]interface{}{
- "b": "value-b",
- "a": "value-a",
- "c": map[string]interface{}{
- "z": "value-z",
- "y": "value-y",
- },
- },
- want: map[string]interface{}{
- "a": "value-a",
- "b": "value-b",
- "c": map[string]interface{}{
- "y": "value-y",
- "z": "value-z",
- },
- },
- },
- {
- name: "map with slices",
- input: map[string]interface{}{
- "b": []interface{}{"apple", "banana", "cherry"},
- "a": []interface{}{"orange", "grape"},
- "c": []interface{}{"strawberry"},
- },
- want: map[string]interface{}{
- "a": []interface{}{"orange", "grape"},
- "b": []interface{}{"apple", "banana", "cherry"},
- "c": []interface{}{"strawberry"},
- },
- },
- {
- name: "map with mixed data types",
- input: map[string]interface{}{
- "b": 50,
- "a": "value-a",
- "c": []interface{}{"strawberry", "banana"},
- "d": map[string]interface{}{
- "x": true,
- "y": 123,
- },
- },
- want: map[string]interface{}{
- "a": "value-a",
- "b": 50,
- "c": []interface{}{"strawberry", "banana"},
- "d": map[string]interface{}{
- "x": true,
- "y": 123,
- },
- },
- },
- {
- name: "map with complex structure",
- input: map[string]interface{}{
- "a": map[string]interface{}{
- "c": "value-c",
- "b": "value-b",
- "a": "value-a",
- },
- "b": "value-b",
- "c": map[string]interface{}{
- "z": map[string]interface{}{
- "a": "value-a",
- "b": "value-b",
- "c": "value-c",
- },
- "y": "value-y",
- },
- "d": map[string]interface{}{
- "q": "value-q",
- "p": "value-p",
- "r": "value-r",
- },
- "e": []interface{}{"strawberry", "banana"},
- },
- want: map[string]interface{}{
- "a": map[string]interface{}{
- "a": "value-a",
- "b": "value-b",
- "c": "value-c",
- },
- "b": "value-b",
- "c": map[string]interface{}{
- "y": "value-y",
- "z": map[string]interface{}{
- "a": "value-a",
- "b": "value-b",
- "c": "value-c",
- },
- },
- "d": map[string]interface{}{
- "p": "value-p",
- "q": "value-q",
- "r": "value-r",
- },
- "e": []interface{}{"strawberry", "banana"},
- },
- },
- {
- name: "map with empty slices and maps",
- input: map[string]interface{}{
- "b": []interface{}{},
- "a": map[string]interface{}{},
- },
- want: map[string]interface{}{
- "a": map[string]interface{}{},
- "b": []interface{}{},
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- input := yaml.JSONObjectToYAMLObject(tt.input)
- SortMapSlice(input)
-
- expect, err := goyaml.Marshal(input)
- if err != nil {
- t.Fatalf("error marshalling output: %v", err)
- }
- actual, err := goyaml.Marshal(tt.want)
- if err != nil {
- t.Fatalf("error marshalling want: %v", err)
- }
-
- if !bytes.Equal(expect, actual) {
- t.Errorf("SortMapSlice() = %s, want %s", expect, actual)
- }
- })
- }
-}
diff --git a/internal/yaml/testdata/values.yaml b/internal/yaml/testdata/values.yaml
deleted file mode 100644
index 51d7c52..0000000
--- a/internal/yaml/testdata/values.yaml
+++ /dev/null
@@ -1,4043 +0,0 @@
-# Snapshot taken from: https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-48.1.1/charts/kube-prometheus-stack/values.yaml
-
-# Default values for kube-prometheus-stack.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-## Provide a name in place of kube-prometheus-stack for `app:` labels
-##
-nameOverride: ""
-
-## Override the deployment namespace
-##
-namespaceOverride: ""
-
-## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
-##
-kubeTargetVersionOverride: ""
-
-## Allow kubeVersion to be overridden while creating the ingress
-##
-kubeVersionOverride: ""
-
-## Provide a name to substitute for the full names of resources
-##
-fullnameOverride: ""
-
-## Labels to apply to all resources
-##
-commonLabels: {}
-# scmhash: abc123
-# myLabel: aakkmd
-
-## Install Prometheus Operator CRDs
-##
-crds:
- enabled: true
-
-## Create default rules for monitoring the cluster
-##
-defaultRules:
- create: true
- rules:
- alertmanager: true
- etcd: true
- configReloaders: true
- general: true
- k8s: true
- kubeApiserverAvailability: true
- kubeApiserverBurnrate: true
- kubeApiserverHistogram: true
- kubeApiserverSlos: true
- kubeControllerManager: true
- kubelet: true
- kubeProxy: true
- kubePrometheusGeneral: true
- kubePrometheusNodeRecording: true
- kubernetesApps: true
- kubernetesResources: true
- kubernetesStorage: true
- kubernetesSystem: true
- kubeSchedulerAlerting: true
- kubeSchedulerRecording: true
- kubeStateMetrics: true
- network: true
- node: true
- nodeExporterAlerting: true
- nodeExporterRecording: true
- prometheus: true
- prometheusOperator: true
- windows: true
-
- ## Reduce app namespace alert scope
- appNamespacesTarget: ".*"
-
- ## Labels for default rules
- labels: {}
- ## Annotations for default rules
- annotations: {}
-
- ## Additional labels for PrometheusRule alerts
- additionalRuleLabels: {}
-
- ## Additional annotations for PrometheusRule alerts
- additionalRuleAnnotations: {}
-
- ## Additional labels for specific PrometheusRule alert groups
- additionalRuleGroupLabels:
- alertmanager: {}
- etcd: {}
- configReloaders: {}
- general: {}
- k8s: {}
- kubeApiserverAvailability: {}
- kubeApiserverBurnrate: {}
- kubeApiserverHistogram: {}
- kubeApiserverSlos: {}
- kubeControllerManager: {}
- kubelet: {}
- kubeProxy: {}
- kubePrometheusGeneral: {}
- kubePrometheusNodeRecording: {}
- kubernetesApps: {}
- kubernetesResources: {}
- kubernetesStorage: {}
- kubernetesSystem: {}
- kubeSchedulerAlerting: {}
- kubeSchedulerRecording: {}
- kubeStateMetrics: {}
- network: {}
- node: {}
- nodeExporterAlerting: {}
- nodeExporterRecording: {}
- prometheus: {}
- prometheusOperator: {}
-
- ## Additional annotations for specific PrometheusRule alerts groups
- additionalRuleGroupAnnotations:
- alertmanager: {}
- etcd: {}
- configReloaders: {}
- general: {}
- k8s: {}
- kubeApiserverAvailability: {}
- kubeApiserverBurnrate: {}
- kubeApiserverHistogram: {}
- kubeApiserverSlos: {}
- kubeControllerManager: {}
- kubelet: {}
- kubeProxy: {}
- kubePrometheusGeneral: {}
- kubePrometheusNodeRecording: {}
- kubernetesApps: {}
- kubernetesResources: {}
- kubernetesStorage: {}
- kubernetesSystem: {}
- kubeSchedulerAlerting: {}
- kubeSchedulerRecording: {}
- kubeStateMetrics: {}
- network: {}
- node: {}
- nodeExporterAlerting: {}
- nodeExporterRecording: {}
- prometheus: {}
- prometheusOperator: {}
-
- ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
- runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
-
- ## Disabled PrometheusRule alerts
- disabled: {}
- # KubeAPIDown: true
- # NodeRAIDDegraded: true
-
-## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
-##
-# additionalPrometheusRules: []
-# - name: my-rule-file
-# groups:
-# - name: my_group
-# rules:
-# - record: my_record
-# expr: 100 * my_record
-
-## Provide custom recording or alerting rules to be deployed into the cluster.
-##
-additionalPrometheusRulesMap: {}
-# rule-name:
-# groups:
-# - name: my_group
-# rules:
-# - record: my_record
-# expr: 100 * my_record
-
-##
-global:
- rbac:
- create: true
-
- ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
- ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
- createAggregateClusterRoles: false
- pspEnabled: false
- pspAnnotations: {}
- ## Specify pod annotations
- ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
- ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
- ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
- ##
- # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
- # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
- # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
-
- ## Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...)
- ##
- imageRegistry: ""
-
- ## Reference to one or more secrets to be used when pulling images
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ##
- imagePullSecrets: []
- # - name: "image-pull-secret"
- # or
- # - "image-pull-secret"
-
-windowsMonitoring:
- ## Deploys the windows-exporter and Windows-specific dashboards and rules
- enabled: false
- ## Job must match jobLabel in the PodMonitor/ServiceMonitor and is used for the rules
- job: prometheus-windows-exporter
-
-## Configuration for alertmanager
-## ref: https://prometheus.io/docs/alerting/alertmanager/
-##
-alertmanager:
-
- ## Deploy alertmanager
- ##
- enabled: true
-
- ## Annotations for Alertmanager
- ##
- annotations: {}
-
- ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
- ##
- apiVersion: v2
-
- ## Service account for Alertmanager to use.
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
- ##
- serviceAccount:
- create: true
- name: ""
- annotations: {}
- automountServiceAccountToken: true
-
- ## Configure pod disruption budgets for Alertmanager
- ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
- ## This configuration is immutable once created and will require the PDB to be deleted to be changed
- ## https://github.com/kubernetes/kubernetes/issues/45398
- ##
- podDisruptionBudget:
- enabled: false
- minAvailable: 1
- maxUnavailable: ""
-
- ## Alertmanager configuration directives
- ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
- ## https://prometheus.io/webtools/alerting/routing-tree-editor/
- ##
- config:
- global:
- resolve_timeout: 5m
- inhibit_rules:
- - source_matchers:
- - 'severity = critical'
- target_matchers:
- - 'severity =~ warning|info'
- equal:
- - 'namespace'
- - 'alertname'
- - source_matchers:
- - 'severity = warning'
- target_matchers:
- - 'severity = info'
- equal:
- - 'namespace'
- - 'alertname'
- - source_matchers:
- - 'alertname = InfoInhibitor'
- target_matchers:
- - 'severity = info'
- equal:
- - 'namespace'
- route:
- group_by: ['namespace']
- group_wait: 30s
- group_interval: 5m
- repeat_interval: 12h
- receiver: 'null'
- routes:
- - receiver: 'null'
- matchers:
- - alertname =~ "InfoInhibitor|Watchdog"
- receivers:
- - name: 'null'
- templates:
- - '/etc/alertmanager/config/*.tmpl'
-
- ## Alertmanager configuration directives (as string type, preferred over the config hash map)
- ## stringConfig will be used only, if tplConfig is true
- ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
- ## https://prometheus.io/webtools/alerting/routing-tree-editor/
- ##
- stringConfig: ""
-
- ## Pass the Alertmanager configuration directives through Helm's templating
- ## engine. If the Alertmanager configuration contains Alertmanager templates,
- ## they'll need to be properly escaped so that they are not interpreted by
- ## Helm
- ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
- ## https://prometheus.io/docs/alerting/configuration/#tmpl_string
- ## https://prometheus.io/docs/alerting/notifications/
- ## https://prometheus.io/docs/alerting/notification_examples/
- tplConfig: false
-
- ## Alertmanager template files to format alerts
- ## By default, templateFiles are placed in /etc/alertmanager/config/ and if
- ## they have a .tmpl file suffix will be loaded. See config.templates above
- ## to change, add other suffixes. If adding other suffixes, be sure to update
- ## config.templates above to include those suffixes.
- ## ref: https://prometheus.io/docs/alerting/notifications/
- ## https://prometheus.io/docs/alerting/notification_examples/
- ##
- templateFiles: {}
- #
- ## An example template:
- # template_1.tmpl: |-
- # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
- #
- # {{ define "slack.myorg.text" }}
- # {{- $root := . -}}
- # {{ range .Alerts }}
- # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
- # *Cluster:* {{ template "cluster" $root }}
- # *Description:* {{ .Annotations.description }}
- # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
- # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
- # *Details:*
- # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
- # {{ end }}
- # {{ end }}
- # {{ end }}
-
- ingress:
- enabled: false
-
- # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
- # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
- # ingressClassName: nginx
-
- annotations: {}
-
- labels: {}
-
- ## Override ingress to a different defined port on the service
- # servicePort: 8081
- ## Override ingress to a different service then the default, this is useful if you need to
- ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0)
- # serviceName: kube-prometheus-stack-alertmanager-0
-
- ## Hosts must be provided if Ingress is enabled.
- ##
- hosts: []
- # - alertmanager.domain.com
-
- ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
- ##
- paths: []
- # - /
-
- ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
- ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
- # pathType: ImplementationSpecific
-
- ## TLS configuration for Alertmanager Ingress
- ## Secret must be manually created in the namespace
- ##
- tls: []
- # - secretName: alertmanager-general-tls
- # hosts:
- # - alertmanager.example.com
-
- ## Configuration for Alertmanager secret
- ##
- secret:
- annotations: {}
-
- ## Configuration for creating an Ingress that will map to each Alertmanager replica service
- ## alertmanager.servicePerReplica must be enabled
- ##
- ingressPerReplica:
- enabled: false
-
- # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
- # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
- # ingressClassName: nginx
-
- annotations: {}
- labels: {}
-
- ## Final form of the hostname for each per replica ingress is
- ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
- ##
- ## Prefix for the per replica ingress that will have `-$replicaNumber`
- ## appended to the end
- hostPrefix: ""
- ## Domain that will be used for the per replica ingress
- hostDomain: ""
-
- ## Paths to use for ingress rules
- ##
- paths: []
- # - /
-
- ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
- ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
- # pathType: ImplementationSpecific
-
- ## Secret name containing the TLS certificate for alertmanager per replica ingress
- ## Secret must be manually created in the namespace
- tlsSecretName: ""
-
- ## Separated secret for each per replica Ingress. Can be used together with cert-manager
- ##
- tlsSecretPerReplica:
- enabled: false
- ## Final form of the secret for each per replica ingress is
- ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
- ##
- prefix: "alertmanager"
-
- ## Configuration for Alertmanager service
- ##
- service:
- annotations: {}
- labels: {}
- clusterIP: ""
-
- ## Port for Alertmanager Service to listen on
- ##
- port: 9093
- ## To be used with a proxy extraContainer port
- ##
- targetPort: 9093
- ## Port to expose on each node
- ## Only used if service.type is 'NodePort'
- ##
- nodePort: 30903
- ## List of IP addresses at which the Prometheus server service is available
- ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
- ##
-
- ## Additional ports to open for Alertmanager service
- additionalPorts: []
- # additionalPorts:
- # - name: authenticated
- # port: 8081
- # targetPort: 8081
-
- externalIPs: []
- loadBalancerIP: ""
- loadBalancerSourceRanges: []
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## If you want to make sure that connections from a particular client are passed to the same Pod each time
- ## Accepts 'ClientIP' or ''
- ##
- sessionAffinity: ""
-
- ## Service type
- ##
- type: ClusterIP
-
- ## Configuration for creating a separate Service for each statefulset Alertmanager replica
- ##
- servicePerReplica:
- enabled: false
- annotations: {}
-
- ## Port for Alertmanager Service per replica to listen on
- ##
- port: 9093
-
- ## To be used with a proxy extraContainer port
- targetPort: 9093
-
- ## Port to expose on each node
- ## Only used if servicePerReplica.type is 'NodePort'
- ##
- nodePort: 30904
-
- ## Loadbalancer source IP ranges
- ## Only used if servicePerReplica.type is "LoadBalancer"
- loadBalancerSourceRanges: []
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## Service type
- ##
- type: ClusterIP
-
- ## If true, create a serviceMonitor for alertmanager
- ##
- serviceMonitor:
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
- selfMonitor: true
-
- ## Additional labels
- ##
- additionalLabels: {}
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
- scheme: ""
-
- ## enableHttp2: Whether to enable HTTP2.
- ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
- enableHttp2: true
-
- ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
- ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
- tlsConfig: {}
-
- bearerTokenFile:
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Settings affecting alertmanagerSpec
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec
- ##
- alertmanagerSpec:
- ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
- ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
- ##
- podMetadata: {}
-
- ## Image of Alertmanager
- ##
- image:
- registry: quay.io
- repository: prometheus/alertmanager
- tag: v0.25.0
- sha: ""
-
- ## If true then the user will be responsible to provide a secret with alertmanager configuration
- ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
- ##
- useExistingSecret: false
-
- ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
- ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
- ##
- secrets: []
-
- ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
- ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
- ##
- configMaps: []
-
- ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
- ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
- ##
- # configSecret:
-
- ## WebTLSConfig defines the TLS parameters for HTTPS
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec
- web: {}
-
- ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
- ##
- alertmanagerConfigSelector: {}
- ## Example which selects all alertmanagerConfig resources
- ## with label "alertconfig" with values any of "example-config" or "example-config-2"
- # alertmanagerConfigSelector:
- # matchExpressions:
- # - key: alertconfig
- # operator: In
- # values:
- # - example-config
- # - example-config-2
- #
- ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
- # alertmanagerConfigSelector:
- # matchLabels:
- # role: example-config
-
- ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
- ##
- alertmanagerConfigNamespaceSelector: {}
- ## Example which selects all namespaces
- ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
- # alertmanagerConfigNamespaceSelector:
- # matchExpressions:
- # - key: alertmanagerconfig
- # operator: In
- # values:
- # - example-namespace
- # - example-namespace-2
-
- ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
- # alertmanagerConfigNamespaceSelector:
- # matchLabels:
- # alertmanagerconfig: enabled
-
- ## AlermanagerConfig to be used as top level configuration
- ##
- alertmanagerConfiguration: {}
- ## Example with select a global alertmanagerconfig
- # alertmanagerConfiguration:
- # name: global-alertmanager-Configuration
-
- ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
- ##
- alertmanagerConfigMatcherStrategy: {}
- ## Example with use OnNamespace strategy
- # alertmanagerConfigMatcherStrategy:
- # type: OnNamespace
-
- ## Define Log Format
- # Use logfmt (default) or json logging
- logFormat: logfmt
-
- ## Log level for Alertmanager to be configured with.
- ##
- logLevel: info
-
- ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
- ## running cluster equal to the expected size.
- replicas: 1
-
- ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
- ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
- ##
- retention: 120h
-
- ## Storage is the definition of how storage will be used by the Alertmanager instances.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
- ##
- storage: {}
- # volumeClaimTemplate:
- # spec:
- # storageClassName: gluster
- # accessModes: ["ReadWriteOnce"]
- # resources:
- # requests:
- # storage: 50Gi
- # selector: {}
-
-
- ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
- ##
- externalUrl:
-
- ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
- ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
- ##
- routePrefix: /
-
- ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS.
- scheme: ""
-
- ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS.
- ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
- tlsConfig: {}
-
- ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
- ##
- paused: false
-
- ## Define which Nodes the Pods are scheduled on.
- ## ref: https://kubernetes.io/docs/user-guide/node-selection/
- ##
- nodeSelector: {}
-
- ## Define resources requests and limits for single Pods.
- ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
- ##
- resources: {}
- # requests:
- # memory: 400Mi
-
- ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
- ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
- ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
- ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
- ##
- podAntiAffinity: ""
-
- ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
- ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
- ##
- podAntiAffinityTopologyKey: kubernetes.io/hostname
-
- ## Assign custom affinity rules to the alertmanager instance
- ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- ##
- affinity: {}
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: kubernetes.io/e2e-az-name
- # operator: In
- # values:
- # - e2e-az1
- # - e2e-az2
-
- ## If specified, the pod's tolerations.
- ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
- ##
- tolerations: []
- # - key: "key"
- # operator: "Equal"
- # value: "value"
- # effect: "NoSchedule"
-
- ## If specified, the pod's topology spread constraints.
- ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
- ##
- topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
- # labelSelector:
- # matchLabels:
- # app: alertmanager
-
- ## SecurityContext holds pod-level security attributes and common container settings.
- ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- ##
- securityContext:
- runAsGroup: 2000
- runAsNonRoot: true
- runAsUser: 1000
- fsGroup: 2000
- seccompProfile:
- type: RuntimeDefault
-
- ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
- ## Note this is only for the Alertmanager UI, not the gossip communication.
- ##
- listenLocal: false
-
- ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
- ##
- containers: []
- # containers:
- # - name: oauth-proxy
- # image: quay.io/oauth2-proxy/oauth2-proxy:v7.3.0
- # args:
- # - --upstream=http://127.0.0.1:9093
- # - --http-address=0.0.0.0:8081
- # - ...
- # ports:
- # - containerPort: 8081
- # name: oauth-proxy
- # protocol: TCP
- # resources: {}
-
- # Additional volumes on the output StatefulSet definition.
- volumes: []
-
- # Additional VolumeMounts on the output StatefulSet definition.
- volumeMounts: []
-
- ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
- ## (permissions, dir tree) on mounted volumes before starting prometheus
- initContainers: []
-
- ## Priority class assigned to the Pods
- ##
- priorityClassName: ""
-
- ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
- ##
- additionalPeers: []
-
- ## PortName to use for Alert Manager.
- ##
- portName: "http-web"
-
- ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
- ##
- clusterAdvertiseAddress: false
-
- ## clusterGossipInterval determines interval between gossip attempts.
- ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
- clusterGossipInterval: ""
-
- ## clusterPeerTimeout determines timeout for cluster peering.
- ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
- clusterPeerTimeout: ""
-
- ## clusterPushpullInterval determines interval between pushpull attempts.
- ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
- clusterPushpullInterval: ""
-
- ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
- ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
- forceEnableClusterMode: false
-
- ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
- ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
- minReadySeconds: 0
-
- ## ExtraSecret can be used to store various data in an extra secret
- ## (use it for example to store hashed basic auth credentials)
- extraSecret:
- ## if not set, name will be auto generated
- # name: ""
- annotations: {}
- data: {}
- # auth: |
- # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
- # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
-
-## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
-##
-grafana:
- enabled: true
- namespaceOverride: ""
-
- ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
- ##
- forceDeployDatasources: false
-
- ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
- ##
- forceDeployDashboards: false
-
- ## Deploy default dashboards
- ##
- defaultDashboardsEnabled: true
-
- ## Timezone for the default dashboards
- ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
- ##
- defaultDashboardsTimezone: utc
-
- adminPassword: prom-operator
-
- rbac:
- ## If true, Grafana PSPs will be created
- ##
- pspEnabled: false
-
- ingress:
- ## If true, Grafana Ingress will be created
- ##
- enabled: false
-
- ## IngressClassName for Grafana Ingress.
- ## Should be provided if Ingress is enable.
- ##
- # ingressClassName: nginx
-
- ## Annotations for Grafana Ingress
- ##
- annotations: {}
- # kubernetes.io/ingress.class: nginx
- # kubernetes.io/tls-acme: "true"
-
- ## Labels to be added to the Ingress
- ##
- labels: {}
-
- ## Hostnames.
- ## Must be provided if Ingress is enable.
- ##
- # hosts:
- # - grafana.domain.com
- hosts: []
-
- ## Path for grafana ingress
- path: /
-
- ## TLS configuration for grafana Ingress
- ## Secret must be manually created in the namespace
- ##
- tls: []
- # - secretName: grafana-general-tls
- # hosts:
- # - grafana.example.com
-
- sidecar:
- dashboards:
- enabled: true
- label: grafana_dashboard
- labelValue: "1"
- # Allow discovery in all namespaces for dashboards
- searchNamespace: ALL
-
- ## Annotations for Grafana dashboard configmaps
- ##
- annotations: {}
- multicluster:
- global:
- enabled: false
- etcd:
- enabled: false
- provider:
- allowUiUpdates: false
- datasources:
- enabled: true
- defaultDatasourceEnabled: true
- isDefaultDatasource: true
-
- uid: prometheus
-
- ## URL of prometheus datasource
- ##
- # url: http://prometheus-stack-prometheus:9090/
-
- ## Prometheus request timeout in seconds
- # timeout: 30
-
- # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
- # defaultDatasourceScrapeInterval: 15s
-
- ## Annotations for Grafana datasource configmaps
- ##
- annotations: {}
-
- ## Set method for HTTP to send query to datasource
- httpMethod: POST
-
- ## Create datasource for each Pod of Prometheus StatefulSet;
- ## this uses headless service `prometheus-operated` which is
- ## created by Prometheus Operator
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
- createPrometheusReplicasDatasources: false
- label: grafana_datasource
- labelValue: "1"
-
- ## Field with internal link pointing to existing data source in Grafana.
- ## Can be provisioned via additionalDataSources
- exemplarTraceIdDestinations: {}
- # datasourceUid: Jaeger
- # traceIdLabelName: trace_id
- alertmanager:
- enabled: true
- uid: alertmanager
- handleGrafanaManagedAlerts: false
- implementation: prometheus
-
- extraConfigmapMounts: []
- # - name: certs-configmap
- # mountPath: /etc/grafana/ssl/
- # configMap: certs-configmap
- # readOnly: true
-
- deleteDatasources: []
- # - name: example-datasource
- # orgId: 1
-
- ## Configure additional grafana datasources (passed through tpl)
- ## ref: http://docs.grafana.org/administration/provisioning/#datasources
- additionalDataSources: []
- # - name: prometheus-sample
- # access: proxy
- # basicAuth: true
- # basicAuthPassword: pass
- # basicAuthUser: daco
- # editable: false
- # jsonData:
- # tlsSkipVerify: true
- # orgId: 1
- # type: prometheus
- # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
- # version: 1
-
- ## Passed to grafana subchart and used by servicemonitor below
- ##
- service:
- portName: http-web
-
- serviceMonitor:
- # If true, a ServiceMonitor CRD is created for a prometheus operator
- # https://github.com/coreos/prometheus-operator
- #
- enabled: true
-
- # Path to use for scraping metrics. Might be different if server.root_url is set
- # in grafana.ini
- path: "/metrics"
-
- # namespace: monitoring (defaults to use the namespace this chart is deployed to)
-
- # labels for the ServiceMonitor
- labels: {}
-
- # Scrape interval. If not set, the Prometheus default scrape interval is used.
- #
- interval: ""
- scheme: http
- tlsConfig: {}
- scrapeTimeout: 30s
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
-## Flag to disable all the kubernetes component scrapers
-##
-kubernetesServiceMonitors:
- enabled: true
-
-## Component scraping the kube api server
-##
-kubeApiServer:
- enabled: true
- tlsConfig:
- serverName: kubernetes
- insecureSkipVerify: false
- serviceMonitor:
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- jobLabel: component
- selector:
- matchLabels:
- component: apiserver
- provider: kubernetes
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings:
- # Drop excessively noisy apiserver buckets.
- - action: drop
- regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50)
- sourceLabels:
- - __name__
- - le
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels:
- # - __meta_kubernetes_namespace
- # - __meta_kubernetes_service_name
- # - __meta_kubernetes_endpoint_port_name
- # action: keep
- # regex: default;kubernetes;https
- # - targetLabel: __address__
- # replacement: kubernetes.default.svc:443
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping the kubelet and kubelet-hosted cAdvisor
-##
-kubelet:
- enabled: true
- namespace: kube-system
-
- serviceMonitor:
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## Enable scraping the kubelet over https. For requirements to enable this see
- ## https://github.com/prometheus-operator/prometheus-operator/issues/926
- ##
- https: true
-
- ## Enable scraping /metrics/cadvisor from kubelet's service
- ##
- cAdvisor: true
-
- ## Enable scraping /metrics/probes from kubelet's service
- ##
- probes: true
-
- ## Enable scraping /metrics/resource from kubelet's service
- ## This is disabled by default because container metrics are already exposed by cAdvisor
- ##
- resource: false
- # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
- resourcePath: "/metrics/resource/v1alpha1"
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- cAdvisorMetricRelabelings:
- # Drop less useful container CPU metrics.
- - sourceLabels: [__name__]
- action: drop
- regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
- # Drop less useful container / always zero filesystem metrics.
- - sourceLabels: [__name__]
- action: drop
- regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
- # Drop less useful / always zero container memory metrics.
- - sourceLabels: [__name__]
- action: drop
- regex: 'container_memory_(mapped_file|swap)'
- # Drop less useful container process metrics.
- - sourceLabels: [__name__]
- action: drop
- regex: 'container_(file_descriptors|tasks_state|threads_max)'
- # Drop container spec metrics that overlap with kube-state-metrics.
- - sourceLabels: [__name__]
- action: drop
- regex: 'container_spec.*'
- # Drop cgroup metrics with no pod.
- - sourceLabels: [id, pod]
- action: drop
- regex: '.+;'
- # - sourceLabels: [__name__, image]
- # separator: ;
- # regex: container_([a-z_]+);
- # replacement: $1
- # action: drop
- # - sourceLabels: [__name__]
- # separator: ;
- # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
- # replacement: $1
- # action: drop
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- probesMetricRelabelings: []
- # - sourceLabels: [__name__, image]
- # separator: ;
- # regex: container_([a-z_]+);
- # replacement: $1
- # action: drop
- # - sourceLabels: [__name__]
- # separator: ;
- # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
- # replacement: $1
- # action: drop
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- ## metrics_path is required to match upstream rules and charts
- cAdvisorRelabelings:
- - action: replace
- sourceLabels: [__metrics_path__]
- targetLabel: metrics_path
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- probesRelabelings:
- - action: replace
- sourceLabels: [__metrics_path__]
- targetLabel: metrics_path
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- resourceRelabelings:
- - action: replace
- sourceLabels: [__metrics_path__]
- targetLabel: metrics_path
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - sourceLabels: [__name__, image]
- # separator: ;
- # regex: container_([a-z_]+);
- # replacement: $1
- # action: drop
- # - sourceLabels: [__name__]
- # separator: ;
- # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
- # replacement: $1
- # action: drop
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- ## metrics_path is required to match upstream rules and charts
- relabelings:
- - action: replace
- sourceLabels: [__metrics_path__]
- targetLabel: metrics_path
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping the kube controller manager
-##
-kubeControllerManager:
- enabled: true
-
- ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
- ##
- endpoints: []
- # - 10.141.4.22
- # - 10.141.4.23
- # - 10.141.4.24
-
- ## If using kubeControllerManager.endpoints only the port and targetPort are used
- ##
- service:
- enabled: true
- ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
- ## of default port in Kubernetes 1.22.
- ##
- port: null
- targetPort: null
- # selector:
- # component: kube-controller-manager
-
- serviceMonitor:
- enabled: true
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## Enable scraping kube-controller-manager over https.
- ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
- ## If null or unset, the value is determined dynamically based on target Kubernetes version.
- ##
- https: null
-
- # Skip TLS certificate validation when scraping
- insecureSkipVerify: null
-
- # Name of the server to use when validating TLS certificate
- serverName: null
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping coreDns. Use either this or kubeDns
-##
-coreDns:
- enabled: true
- service:
- port: 9153
- targetPort: 9153
- # selector:
- # k8s-app: kube-dns
- serviceMonitor:
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping kubeDns. Use either this or coreDns
-##
-kubeDns:
- enabled: false
- service:
- dnsmasq:
- port: 10054
- targetPort: 10054
- skydns:
- port: 10055
- targetPort: 10055
- # selector:
- # k8s-app: kube-dns
- serviceMonitor:
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- dnsmasqMetricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- dnsmasqRelabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping etcd
-##
-kubeEtcd:
- enabled: true
-
- ## If your etcd is not deployed as a pod, specify IPs it can be found on
- ##
- endpoints: []
- # - 10.141.4.22
- # - 10.141.4.23
- # - 10.141.4.24
-
- ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
- ##
- service:
- enabled: true
- port: 2381
- targetPort: 2381
- # selector:
- # component: etcd
-
- ## Configure secure access to the etcd cluster by loading a secret into prometheus and
- ## specifying security configuration below. For example, with a secret named etcd-client-cert
- ##
- ## serviceMonitor:
- ## scheme: https
- ## insecureSkipVerify: false
- ## serverName: localhost
- ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
- ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
- ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
- ##
- serviceMonitor:
- enabled: true
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
- scheme: http
- insecureSkipVerify: false
- serverName: ""
- caFile: ""
- certFile: ""
- keyFile: ""
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping kube scheduler
-##
-kubeScheduler:
- enabled: true
-
- ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
- ##
- endpoints: []
- # - 10.141.4.22
- # - 10.141.4.23
- # - 10.141.4.24
-
- ## If using kubeScheduler.endpoints only the port and targetPort are used
- ##
- service:
- enabled: true
- ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
- ## of default port in Kubernetes 1.23.
- ##
- port: null
- targetPort: null
- # selector:
- # component: kube-scheduler
-
- serviceMonitor:
- enabled: true
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
- ## Enable scraping kube-scheduler over https.
- ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
- ## If null or unset, the value is determined dynamically based on target Kubernetes version.
- ##
- https: null
-
- ## Skip TLS certificate validation when scraping
- insecureSkipVerify: null
-
- ## Name of the server to use when validating TLS certificate
- serverName: null
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping kube proxy
-##
-kubeProxy:
- enabled: true
-
- ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
- ##
- endpoints: []
- # - 10.141.4.22
- # - 10.141.4.23
- # - 10.141.4.24
-
- service:
- enabled: true
- port: 10249
- targetPort: 10249
- # selector:
- # k8s-app: kube-proxy
-
- serviceMonitor:
- enabled: true
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## Enable scraping kube-proxy over https.
- ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
- ##
- https: false
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## Additional labels
- ##
- additionalLabels: {}
- # foo: bar
-
-## Component scraping kube state metrics
-##
-kubeStateMetrics:
- enabled: true
-
-## Configuration for kube-state-metrics subchart
-##
-kube-state-metrics:
- namespaceOverride: ""
- rbac:
- create: true
- releaseLabel: true
- prometheus:
- monitor:
- enabled: true
-
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used.
- ##
- scrapeTimeout: ""
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- # Keep labels from scraped data, overriding server-side labels
- ##
- honorLabels: true
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- selfMonitor:
- enabled: false
-
-## Deploy node exporter as a daemonset to all nodes
-##
-nodeExporter:
- enabled: true
-
-## Configuration for prometheus-node-exporter subchart
-##
-prometheus-node-exporter:
- namespaceOverride: ""
- podLabels:
- ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards
- ##
- jobLabel: node-exporter
- releaseLabel: true
- extraArgs:
- - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
- service:
- portName: http-metrics
- prometheus:
- monitor:
- enabled: true
-
- jobLabel: jobLabel
-
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
- ##
- scrapeTimeout: ""
-
- ## proxyUrl: URL of a proxy that should be used for scraping.
- ##
- proxyUrl: ""
-
- ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- metricRelabelings: []
- # - sourceLabels: [__name__]
- # separator: ;
- # regex: ^node_mountstats_nfs_(event|operations|transport)_.+
- # replacement: $1
- # action: drop
-
- ## RelabelConfigs to apply to samples before scraping
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
- rbac:
- ## If true, create PSPs for node-exporter
- ##
- pspEnabled: false
-
-## Manages Prometheus and Alertmanager components
-##
-prometheusOperator:
- enabled: true
-
- ## Prometheus-Operator v0.39.0 and later support TLS natively.
- ##
- tls:
- enabled: true
- # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
- tlsMinVersion: VersionTLS13
- # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
- internalPort: 10250
-
- ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
- ## rules from making their way into prometheus and potentially preventing the container from starting
- admissionWebhooks:
- ## Valid values: Fail, Ignore, IgnoreOnInstallOnly
- ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail"
- failurePolicy: ""
- ## The default timeoutSeconds is 10 and the maximum value is 30.
- timeoutSeconds: 10
- enabled: true
- ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
- ## If unspecified, system trust roots on the apiserver are used.
- caBundle: ""
- ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
- ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
- ## certs ahead of time if you wish.
- ##
- annotations: {}
- # argocd.argoproj.io/hook: PreSync
- # argocd.argoproj.io/hook-delete-policy: HookSucceeded
- patch:
- enabled: true
- image:
- registry: registry.k8s.io
- repository: ingress-nginx/kube-webhook-certgen
- tag: v20221220-controller-v1.5.1-58-g787ea74b6
- sha: ""
- pullPolicy: IfNotPresent
- resources: {}
- ## Provide a priority class name to the webhook patching job
- ##
- priorityClassName: ""
- annotations: {}
- # argocd.argoproj.io/hook: PreSync
- # argocd.argoproj.io/hook-delete-policy: HookSucceeded
- podAnnotations: {}
- nodeSelector: {}
- affinity: {}
- tolerations: []
-
- ## SecurityContext holds pod-level security attributes and common container settings.
- ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- ##
- securityContext:
- runAsGroup: 2000
- runAsNonRoot: true
- runAsUser: 2000
- seccompProfile:
- type: RuntimeDefault
-
- # Security context for create job container
- createSecretJob:
- securityContext:
- allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
- capabilities:
- drop:
- - ALL
-
- # Security context for patch job container
- patchWebhookJob:
- securityContext:
- allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
- capabilities:
- drop:
- - ALL
-
- # Use certmanager to generate webhook certs
- certManager:
- enabled: false
- # self-signed root certificate
- rootCert:
- duration: "" # default to be 5y
- admissionCert:
- duration: "" # default to be 1y
- # issuerRef:
- # name: "issuer"
- # kind: "ClusterIssuer"
-
- ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
- ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
- ##
- namespaces: {}
- # releaseNamespace: true
- # additional:
- # - kube-system
-
- ## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
- ##
- denyNamespaces: []
-
- ## Filter namespaces to look for prometheus-operator custom resources
- ##
- alertmanagerInstanceNamespaces: []
- alertmanagerConfigNamespaces: []
- prometheusInstanceNamespaces: []
- thanosRulerInstanceNamespaces: []
-
- ## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
- ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
- ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
- ##
- # clusterDomain: "cluster.local"
-
- networkPolicy:
- ## Enable creation of NetworkPolicy resources.
- ##
- enabled: false
-
- ## Flavor of the network policy to use.
- # Can be:
- # * kubernetes for networking.k8s.io/v1/NetworkPolicy
- # * cilium for cilium.io/v2/CiliumNetworkPolicy
- flavor: kubernetes
-
- # cilium:
- # egress:
-
- ## Service account for Alertmanager to use.
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
- ##
- serviceAccount:
- create: true
- name: ""
-
- ## Configuration for Prometheus operator service
- ##
- service:
- annotations: {}
- labels: {}
- clusterIP: ""
-
- ## Port to expose on each node
- ## Only used if service.type is 'NodePort'
- ##
- nodePort: 30080
-
- nodePortTls: 30443
-
- ## Additional ports to open for Prometheus service
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
- ##
- additionalPorts: []
-
- ## Loadbalancer IP
- ## Only use if service.type is "LoadBalancer"
- ##
- loadBalancerIP: ""
- loadBalancerSourceRanges: []
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## Service type
- ## NodePort, ClusterIP, LoadBalancer
- ##
- type: ClusterIP
-
- ## List of IP addresses at which the Prometheus server service is available
- ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
- ##
- externalIPs: []
-
- # ## Labels to add to the operator deployment
- # ##
- labels: {}
-
- ## Annotations to add to the operator deployment
- ##
- annotations: {}
-
- ## Labels to add to the operator pod
- ##
- podLabels: {}
-
- ## Annotations to add to the operator pod
- ##
- podAnnotations: {}
-
- ## Assign a PriorityClassName to pods if set
- # priorityClassName: ""
-
- ## Define Log Format
- # Use logfmt (default) or json logging
- # logFormat: logfmt
-
- ## Decrease log verbosity to errors only
- # logLevel: error
-
- ## If true, the operator will create and maintain a service for scraping kubelets
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
- ##
- kubeletService:
- enabled: true
- namespace: kube-system
- ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
- name: ""
-
- ## Create a servicemonitor for the operator
- ##
- serviceMonitor:
- ## Labels for ServiceMonitor
- additionalLabels: {}
-
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
- scrapeTimeout: ""
- selfMonitor: true
-
- ## Metric relabel configs to apply to samples before ingestion.
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- # relabel configs to apply to samples before ingestion.
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Resource limits & requests
- ##
- resources: {}
- # limits:
- # cpu: 200m
- # memory: 200Mi
- # requests:
- # cpu: 100m
- # memory: 100Mi
-
- # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
- # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
- ##
- hostNetwork: false
-
- ## Define which Nodes the Pods are scheduled on.
- ## ref: https://kubernetes.io/docs/user-guide/node-selection/
- ##
- nodeSelector: {}
-
- ## Tolerations for use with node taints
- ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
- ##
- tolerations: []
- # - key: "key"
- # operator: "Equal"
- # value: "value"
- # effect: "NoSchedule"
-
- ## Assign custom affinity rules to the prometheus operator
- ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- ##
- affinity: {}
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: kubernetes.io/e2e-az-name
- # operator: In
- # values:
- # - e2e-az1
- # - e2e-az2
- dnsConfig: {}
- # nameservers:
- # - 1.2.3.4
- # searches:
- # - ns1.svc.cluster-domain.example
- # - my.dns.search.suffix
- # options:
- # - name: ndots
- # value: "2"
- # - name: edns0
- securityContext:
- fsGroup: 65534
- runAsGroup: 65534
- runAsNonRoot: true
- runAsUser: 65534
- seccompProfile:
- type: RuntimeDefault
-
- ## Container-specific security context configuration
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- ##
- containerSecurityContext:
- allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
- capabilities:
- drop:
- - ALL
-
- # Enable vertical pod autoscaler support for prometheus-operator
- verticalPodAutoscaler:
- enabled: false
-
- # Recommender responsible for generating recommendation for the object.
- # List should be empty (then the default recommender will generate the recommendation)
- # or contain exactly one recommender.
- # recommenders:
- # - name: custom-recommender-performance
-
- # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
- controlledResources: []
- # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
- # controlledValues: RequestsAndLimits
-
- # Define the max allowed resources for the pod
- maxAllowed: {}
- # cpu: 200m
- # memory: 100Mi
- # Define the min allowed resources for the pod
- minAllowed: {}
- # cpu: 200m
- # memory: 100Mi
-
- updatePolicy:
- # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
- # minReplicas: 1
- # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
- # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
- updateMode: Auto
-
- ## Prometheus-operator image
- ##
- image:
- registry: quay.io
- repository: prometheus-operator/prometheus-operator
- # if not set appVersion field from Chart.yaml is used
- tag: ""
- sha: ""
- pullPolicy: IfNotPresent
-
- ## Prometheus image to use for prometheuses managed by the operator
- ##
- # prometheusDefaultBaseImage: prometheus/prometheus
-
- ## Prometheus image registry to use for prometheuses managed by the operator
- ##
- # prometheusDefaultBaseImageRegistry: quay.io
-
- ## Alertmanager image to use for alertmanagers managed by the operator
- ##
- # alertmanagerDefaultBaseImage: prometheus/alertmanager
-
- ## Alertmanager image registry to use for alertmanagers managed by the operator
- ##
- # alertmanagerDefaultBaseImageRegistry: quay.io
-
- ## Prometheus-config-reloader
- ##
- prometheusConfigReloader:
- image:
- registry: quay.io
- repository: prometheus-operator/prometheus-config-reloader
- # if not set appVersion field from Chart.yaml is used
- tag: ""
- sha: ""
-
- # add prometheus config reloader liveness and readiness probe. Default: false
- enableProbe: false
-
- # resource config for prometheusConfigReloader
- resources:
- requests:
- cpu: 200m
- memory: 50Mi
- limits:
- cpu: 200m
- memory: 50Mi
-
- ## Thanos side-car image when configured
- ##
- thanosImage:
- registry: quay.io
- repository: thanos/thanos
- tag: v0.31.0
- sha: ""
-
- ## Set a Label Selector to filter watched prometheus and prometheusAgent
- ##
- prometheusInstanceSelector: ""
-
- ## Set a Label Selector to filter watched alertmanager
- ##
- alertmanagerInstanceSelector: ""
-
- ## Set a Label Selector to filter watched thanosRuler
- thanosRulerInstanceSelector: ""
-
- ## Set a Field Selector to filter watched secrets
- ##
- secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"
-
-## Deploy a Prometheus instance
-##
-prometheus:
- enabled: true
-
- ## Toggle prometheus into agent mode
- ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/designs/prometheus-agent.md
- ##
- agentMode: false
-
- ## Annotations for Prometheus
- ##
- annotations: {}
-
- ## Configure network policy for the prometheus
- networkPolicy:
- enabled: false
-
- ## Flavor of the network policy to use.
- # Can be:
- # * kubernetes for networking.k8s.io/v1/NetworkPolicy
- # * cilium for cilium.io/v2/CiliumNetworkPolicy
- flavor: kubernetes
-
- # cilium:
- # endpointSelector:
- # egress:
- # ingress:
-
- # egress:
- # - {}
- # ingress:
- # - {}
- # podSelector:
- # matchLabels:
- # app: prometheus
-
- ## Service account for Prometheuses to use.
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
- ##
- serviceAccount:
- create: true
- name: ""
- annotations: {}
-
- # Service for thanos service discovery on sidecar
- # Enable this can make Thanos Query can use
- # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
- # Thanos sidecar on prometheus nodes
- # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
- thanosService:
- enabled: false
- annotations: {}
- labels: {}
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## Service type
- ##
- type: ClusterIP
-
- ## gRPC port config
- portName: grpc
- port: 10901
- targetPort: "grpc"
-
- ## HTTP port config (for metrics)
- httpPortName: http
- httpPort: 10902
- targetHttpPort: "http"
-
- ## ClusterIP to assign
- # Default is to make this a headless service ("None")
- clusterIP: "None"
-
- ## Port to expose on each node, if service type is NodePort
- ##
- nodePort: 30901
- httpNodePort: 30902
-
- # ServiceMonitor to scrape Sidecar metrics
- # Needs thanosService to be enabled as well
- thanosServiceMonitor:
- enabled: false
- interval: ""
-
- ## Additional labels
- ##
- additionalLabels: {}
-
- ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
- scheme: ""
-
- ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
- ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
- tlsConfig: {}
-
- bearerTokenFile:
-
- ## Metric relabel configs to apply to samples before ingestion.
- metricRelabelings: []
-
- ## relabel configs to apply to samples before ingestion.
- relabelings: []
-
- # Service for external access to sidecar
- # Enabling this creates a service to expose thanos-sidecar outside the cluster.
- thanosServiceExternal:
- enabled: false
- annotations: {}
- labels: {}
- loadBalancerIP: ""
- loadBalancerSourceRanges: []
-
- ## gRPC port config
- portName: grpc
- port: 10901
- targetPort: "grpc"
-
- ## HTTP port config (for metrics)
- httpPortName: http
- httpPort: 10902
- targetHttpPort: "http"
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## Service type
- ##
- type: LoadBalancer
-
- ## Port to expose on each node
- ##
- nodePort: 30901
- httpNodePort: 30902
-
- ## Configuration for Prometheus service
- ##
- service:
- annotations: {}
- labels: {}
- clusterIP: ""
-
- ## Port for Prometheus Service to listen on
- ##
- port: 9090
-
- ## To be used with a proxy extraContainer port
- targetPort: 9090
-
- ## List of IP addresses at which the Prometheus server service is available
- ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
- ##
- externalIPs: []
-
- ## Port to expose on each node
- ## Only used if service.type is 'NodePort'
- ##
- nodePort: 30090
-
- ## Loadbalancer IP
- ## Only use if service.type is "LoadBalancer"
- loadBalancerIP: ""
- loadBalancerSourceRanges: []
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## Service type
- ##
- type: ClusterIP
-
- ## Additional port to define in the Service
- additionalPorts: []
- # additionalPorts:
- # - name: authenticated
- # port: 8081
- # targetPort: 8081
-
- ## Consider that all endpoints are considered "ready" even if the Pods themselves are not
- ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
- publishNotReadyAddresses: false
-
- sessionAffinity: ""
-
- ## Configuration for creating a separate Service for each statefulset Prometheus replica
- ##
- servicePerReplica:
- enabled: false
- annotations: {}
-
- ## Port for Prometheus Service per replica to listen on
- ##
- port: 9090
-
- ## To be used with a proxy extraContainer port
- targetPort: 9090
-
- ## Port to expose on each node
- ## Only used if servicePerReplica.type is 'NodePort'
- ##
- nodePort: 30091
-
- ## Loadbalancer source IP ranges
- ## Only used if servicePerReplica.type is "LoadBalancer"
- loadBalancerSourceRanges: []
-
- ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
- ##
- externalTrafficPolicy: Cluster
-
- ## Service type
- ##
- type: ClusterIP
-
- ## Configure pod disruption budgets for Prometheus
- ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
- ## This configuration is immutable once created and will require the PDB to be deleted to be changed
- ## https://github.com/kubernetes/kubernetes/issues/45398
- ##
- podDisruptionBudget:
- enabled: false
- minAvailable: 1
- maxUnavailable: ""
-
- # Ingress exposes thanos sidecar outside the cluster
- thanosIngress:
- enabled: false
-
- # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
- # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
- # ingressClassName: nginx
-
- annotations: {}
- labels: {}
- servicePort: 10901
-
- ## Port to expose on each node
- ## Only used if service.type is 'NodePort'
- ##
- nodePort: 30901
-
- ## Hosts must be provided if Ingress is enabled.
- ##
- hosts: []
- # - thanos-gateway.domain.com
-
- ## Paths to use for ingress rules
- ##
- paths: []
- # - /
-
- ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
- ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
- # pathType: ImplementationSpecific
-
- ## TLS configuration for Thanos Ingress
- ## Secret must be manually created in the namespace
- ##
- tls: []
- # - secretName: thanos-gateway-tls
- # hosts:
- # - thanos-gateway.domain.com
- #
-
- ## ExtraSecret can be used to store various data in an extra secret
- ## (use it for example to store hashed basic auth credentials)
- extraSecret:
- ## if not set, name will be auto generated
- # name: ""
- annotations: {}
- data: {}
- # auth: |
- # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
- # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
-
- ingress:
- enabled: false
-
- # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
- # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
- # ingressClassName: nginx
-
- annotations: {}
- labels: {}
-
- ## Redirect ingress to an additional defined port on the service
- # servicePort: 8081
-
- ## Hostnames.
- ## Must be provided if Ingress is enabled.
- ##
- # hosts:
- # - prometheus.domain.com
- hosts: []
-
- ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
- ##
- paths: []
- # - /
-
- ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
- ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
- # pathType: ImplementationSpecific
-
- ## TLS configuration for Prometheus Ingress
- ## Secret must be manually created in the namespace
- ##
- tls: []
- # - secretName: prometheus-general-tls
- # hosts:
- # - prometheus.example.com
-
- ## Configuration for creating an Ingress that will map to each Prometheus replica service
- ## prometheus.servicePerReplica must be enabled
- ##
- ingressPerReplica:
- enabled: false
-
- # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
- # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
- # ingressClassName: nginx
-
- annotations: {}
- labels: {}
-
- ## Final form of the hostname for each per replica ingress is
- ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
- ##
- ## Prefix for the per replica ingress that will have `-$replicaNumber`
- ## appended to the end
- hostPrefix: ""
- ## Domain that will be used for the per replica ingress
- hostDomain: ""
-
- ## Paths to use for ingress rules
- ##
- paths: []
- # - /
-
- ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
- ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
- # pathType: ImplementationSpecific
-
- ## Secret name containing the TLS certificate for Prometheus per replica ingress
- ## Secret must be manually created in the namespace
- tlsSecretName: ""
-
- ## Separated secret for each per replica Ingress. Can be used together with cert-manager
- ##
- tlsSecretPerReplica:
- enabled: false
- ## Final form of the secret for each per replica ingress is
- ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
- ##
- prefix: "prometheus"
-
- ## Configure additional options for default pod security policy for Prometheus
- ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
- podSecurityPolicy:
- allowedCapabilities: []
- allowedHostPaths: []
- volumes: []
-
- serviceMonitor:
- ## Scrape interval. If not set, the Prometheus default scrape interval is used.
- ##
- interval: ""
- selfMonitor: true
-
- ## Additional labels
- ##
- additionalLabels: {}
-
- ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
- ##
- sampleLimit: 0
-
- ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
- ##
- targetLimit: 0
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelLimit: 0
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelNameLengthLimit: 0
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
- ##
- labelValueLengthLimit: 0
-
- ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
- scheme: ""
-
- ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
- ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
- tlsConfig: {}
-
- bearerTokenFile:
-
- ## Metric relabel configs to apply to samples before ingestion.
- ##
- metricRelabelings: []
- # - action: keep
- # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
- # sourceLabels: [__name__]
-
- # relabel configs to apply to samples before ingestion.
- ##
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
-
- ## Settings affecting prometheusSpec
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec
- ##
- prometheusSpec:
- ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
- ##
- disableCompaction: false
- ## APIServerConfig
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig
- ##
- apiserverConfig: {}
-
- ## Allows setting additional arguments for the Prometheus container
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus
- additionalArgs: []
-
- ## Interval between consecutive scrapes.
- ## Defaults to 30s.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
- ##
- scrapeInterval: ""
-
- ## Number of seconds to wait for target to respond before erroring
- ##
- scrapeTimeout: ""
-
- ## Interval between consecutive evaluations.
- ##
- evaluationInterval: ""
-
- ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
- ##
- listenLocal: false
-
- ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
- ## This is disabled by default.
- ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
- ##
- enableAdminAPI: false
-
- ## Sets version of Prometheus overriding the Prometheus version as derived
- ## from the image tag. Useful in cases where the tag does not follow semver v2.
- version: ""
-
- ## WebTLSConfig defines the TLS parameters for HTTPS
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig
- web: {}
-
- ## Exemplars related settings that are runtime reloadable.
- ## It requires to enable the exemplar storage feature to be effective.
- exemplars: ""
- ## Maximum number of exemplars stored in memory for all series.
- ## If not set, Prometheus uses its default value.
- ## A value of zero or less than zero disables the storage.
- # maxSize: 100000
-
- # EnableFeatures API enables access to Prometheus disabled features.
- # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
- enableFeatures: []
- # - exemplar-storage
-
- ## Image of Prometheus.
- ##
- image:
- registry: quay.io
- repository: prometheus/prometheus
- tag: v2.45.0
- sha: ""
-
- ## Tolerations for use with node taints
- ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
- ##
- tolerations: []
- # - key: "key"
- # operator: "Equal"
- # value: "value"
- # effect: "NoSchedule"
-
- ## If specified, the pod's topology spread constraints.
- ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
- ##
- topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
- # labelSelector:
- # matchLabels:
- # app: prometheus
-
- ## Alertmanagers to which alerts will be sent
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints
- ##
- ## Default configuration will connect to the alertmanager deployed as part of this release
- ##
- alertingEndpoints: []
- # - name: ""
- # namespace: ""
- # port: http
- # scheme: http
- # pathPrefix: ""
- # tlsConfig: {}
- # bearerTokenFile: ""
- # apiVersion: v2
-
- ## External labels to add to any time series or alerts when communicating with external systems
- ##
- externalLabels: {}
-
- ## enable --web.enable-remote-write-receiver flag on prometheus-server
- ##
- enableRemoteWriteReceiver: false
-
- ## Name of the external label used to denote replica name
- ##
- replicaExternalLabelName: ""
-
- ## If true, the Operator won't add the external label used to denote replica name
- ##
- replicaExternalLabelNameClear: false
-
- ## Name of the external label used to denote Prometheus instance name
- ##
- prometheusExternalLabelName: ""
-
- ## If true, the Operator won't add the external label used to denote Prometheus instance name
- ##
- prometheusExternalLabelNameClear: false
-
- ## External URL at which Prometheus will be reachable.
- ##
- externalUrl: ""
-
- ## Define which Nodes the Pods are scheduled on.
- ## ref: https://kubernetes.io/docs/user-guide/node-selection/
- ##
- nodeSelector: {}
-
- ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
- ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
- ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
- ## with the new list of secrets.
- ##
- secrets: []
-
- ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
- ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
- ##
- configMaps: []
-
- ## QuerySpec defines the query command line flags when starting Prometheus.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec
- ##
- query: {}
-
- ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
- ruleNamespaceSelector: {}
- ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
- # ruleNamespaceSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
- ## prometheus resource to be created with selectors based on values in the helm deployment,
- ## which will also match the PrometheusRule resources created
- ##
- ruleSelectorNilUsesHelmValues: true
-
- ## PrometheusRules to be selected for target discovery.
- ## If {}, select all PrometheusRules
- ##
- ruleSelector: {}
- ## Example which select all PrometheusRules resources
- ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
- # ruleSelector:
- # matchExpressions:
- # - key: prometheus
- # operator: In
- # values:
- # - example-rules
- # - example-rules-2
- #
- ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
- # ruleSelector:
- # matchLabels:
- # role: example-rules
-
- ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
- ## prometheus resource to be created with selectors based on values in the helm deployment,
- ## which will also match the servicemonitors created
- ##
- serviceMonitorSelectorNilUsesHelmValues: true
-
- ## ServiceMonitors to be selected for target discovery.
- ## If {}, select all ServiceMonitors
- ##
- serviceMonitorSelector: {}
- ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
- # serviceMonitorSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## Namespaces to be selected for ServiceMonitor discovery.
- ##
- serviceMonitorNamespaceSelector: {}
- ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
- # serviceMonitorNamespaceSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
- ## prometheus resource to be created with selectors based on values in the helm deployment,
- ## which will also match the podmonitors created
- ##
- podMonitorSelectorNilUsesHelmValues: true
-
- ## PodMonitors to be selected for target discovery.
- ## If {}, select all PodMonitors
- ##
- podMonitorSelector: {}
- ## Example which selects PodMonitors with label "prometheus" set to "somelabel"
- # podMonitorSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
- podMonitorNamespaceSelector: {}
- ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
- # podMonitorNamespaceSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
- ## prometheus resource to be created with selectors based on values in the helm deployment,
- ## which will also match the probes created
- ##
- probeSelectorNilUsesHelmValues: true
-
- ## Probes to be selected for target discovery.
- ## If {}, select all Probes
- ##
- probeSelector: {}
- ## Example which selects Probes with label "prometheus" set to "somelabel"
- # probeSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## If nil, select own namespace. Namespaces to be selected for Probe discovery.
- probeNamespaceSelector: {}
- ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
- # probeNamespaceSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the
- ## prometheus resource to be created with selectors based on values in the helm deployment,
- ## which will also match the scrapeConfigs created
- ##
- scrapeConfigSelectorNilUsesHelmValues: true
-
- ## scrapeConfigs to be selected for target discovery.
- ## If {}, select all scrapeConfigs
- ##
- scrapeConfigSelector: {}
- ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel"
- # scrapeConfig:
- # matchLabels:
- # prometheus: somelabel
-
- ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery.
- scrapeConfigNamespaceSelector: {}
- ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel"
- # scrapeConfigsNamespaceSelector:
- # matchLabels:
- # prometheus: somelabel
-
- ## How long to retain metrics
- ##
- retention: 10d
-
- ## Maximum size of metrics
- ##
- retentionSize: ""
-
- ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
- ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
- tsdb:
- outOfOrderTimeWindow: 0s
-
- ## Enable compression of the write-ahead log using Snappy.
- ##
- walCompression: true
-
- ## If true, the Operator won't process any Prometheus configuration changes
- ##
- paused: false
-
- ## Number of replicas of each shard to deploy for a Prometheus deployment.
- ## Number of replicas multiplied by shards is the total number of Pods created.
- ##
- replicas: 1
-
- ## EXPERIMENTAL: Number of shards to distribute targets onto.
- ## Number of replicas multiplied by shards is the total number of Pods created.
- ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
- ## Increasing shards will not reshard data either but it will continue to be available from the same instances.
- ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
- ## Sharding is done on the content of the `__address__` target meta-label.
- ##
- shards: 1
-
- ## Log level for Prometheus be configured in
- ##
- logLevel: info
-
- ## Log format for Prometheus be configured in
- ##
- logFormat: logfmt
-
- ## Prefix used to register routes, overriding externalUrl route.
- ## Useful for proxies that rewrite URLs.
- ##
- routePrefix: /
-
- ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
- ## Metadata Labels and Annotations gets propagated to the prometheus pods.
- ##
- podMetadata: {}
- # labels:
- # app: prometheus
- # k8s-app: prometheus
-
- ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
- ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
- ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
- ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
- podAntiAffinity: ""
-
- ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
- ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
- ##
- podAntiAffinityTopologyKey: kubernetes.io/hostname
-
- ## Assign custom affinity rules to the prometheus instance
- ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- ##
- affinity: {}
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: kubernetes.io/e2e-az-name
- # operator: In
- # values:
- # - e2e-az1
- # - e2e-az2
-
- ## The remote_read spec configuration for Prometheus.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec
- remoteRead: []
- # - url: http://remote1/read
- ## additionalRemoteRead is appended to remoteRead
- additionalRemoteRead: []
-
- ## The remote_write spec configuration for Prometheus.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec
- remoteWrite: []
- # - url: http://remote1/push
- ## additionalRemoteWrite is appended to remoteWrite
- additionalRemoteWrite: []
-
- ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
- remoteWriteDashboards: false
-
- ## Resource limits & requests
- ##
- resources: {}
- # requests:
- # memory: 400Mi
-
- ## Prometheus StorageSpec for persistent data
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
- ##
- storageSpec: {}
- ## Using PersistentVolumeClaim
- ##
- # volumeClaimTemplate:
- # spec:
- # storageClassName: gluster
- # accessModes: ["ReadWriteOnce"]
- # resources:
- # requests:
- # storage: 50Gi
- # selector: {}
-
- ## Using tmpfs volume
- ##
- # emptyDir:
- # medium: Memory
-
- # Additional volumes on the output StatefulSet definition.
- volumes: []
-
- # Additional VolumeMounts on the output StatefulSet definition.
- volumeMounts: []
-
- ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
- ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
- ## as specified in the official Prometheus documentation:
- ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
- ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
- ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
- ## scrape configs are going to break Prometheus after the upgrade.
- ## AdditionalScrapeConfigs can be defined as a list or as a templated string.
- ##
- ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
- ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
- ##
- additionalScrapeConfigs: []
- # - job_name: kube-etcd
- # kubernetes_sd_configs:
- # - role: node
- # scheme: https
- # tls_config:
- # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
- # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
- # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
- # relabel_configs:
- # - action: labelmap
- # regex: __meta_kubernetes_node_label_(.+)
- # - source_labels: [__address__]
- # action: replace
- # targetLabel: __address__
- # regex: ([^:;]+):(\d+)
- # replacement: ${1}:2379
- # - source_labels: [__meta_kubernetes_node_name]
- # action: keep
- # regex: .*mst.*
- # - source_labels: [__meta_kubernetes_node_name]
- # action: replace
- # targetLabel: node
- # regex: (.*)
- # replacement: ${1}
- # metric_relabel_configs:
- # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
- # action: labeldrop
- #
- ## If scrape config contains a repetitive section, you may want to use a template.
- ## In the following example, you can see how to define `gce_sd_configs` for multiple zones
- # additionalScrapeConfigs: |
- # - job_name: "node-exporter"
- # gce_sd_configs:
- # {{range $zone := .Values.gcp_zones}}
- # - project: "project1"
- # zone: "{{$zone}}"
- # port: 9100
- # {{end}}
- # relabel_configs:
- # ...
-
-
- ## If additional scrape configurations are already deployed in a single secret file you can use this section.
- ## Expected values are the secret name and key
- ## Cannot be used with additionalScrapeConfigs
- additionalScrapeConfigsSecret: {}
- # enabled: false
- # name:
- # key:
-
- ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
- ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
- additionalPrometheusSecretsAnnotations: {}
-
- ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
- ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#.
- ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
- ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
- ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
- ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
- ##
- additionalAlertManagerConfigs: []
- # - consul_sd_configs:
- # - server: consul.dev.test:8500
- # scheme: http
- # datacenter: dev
- # tag_separator: ','
- # services:
- # - metrics-prometheus-alertmanager
-
- ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
- ## them separately from the helm deployment, you can use this section.
- ## Expected values are the secret name and key
- ## Cannot be used with additionalAlertManagerConfigs
- additionalAlertManagerConfigsSecret: {}
- # name:
- # key:
- # optional: false
-
- ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
- ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
- ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
- ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
- ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
- ## configs are going to break Prometheus after the upgrade.
- ##
- additionalAlertRelabelConfigs: []
- # - separator: ;
- # regex: prometheus_replica
- # replacement: $1
- # action: labeldrop
-
- ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
- ## them separately from the helm deployment, you can use this section.
- ## Expected values are the secret name and key
- ## Cannot be used with additionalAlertRelabelConfigs
- additionalAlertRelabelConfigsSecret: {}
- # name:
- # key:
-
- ## SecurityContext holds pod-level security attributes and common container settings.
- ## This defaults to non root user with uid 1000 and gid 2000.
- ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
- ##
- securityContext:
- runAsGroup: 2000
- runAsNonRoot: true
- runAsUser: 1000
- fsGroup: 2000
- seccompProfile:
- type: RuntimeDefault
-
- ## Priority class assigned to the Pods
- ##
- priorityClassName: ""
-
- ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
- ## This section is experimental, it may change significantly without deprecation notice in any release.
- ## This is experimental and may change significantly without backward compatibility in any release.
- ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec
- ##
- thanos: {}
- # secretProviderClass:
- # provider: gcp
- # parameters:
- # secrets: |
- # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
- # fileName: "objstore.yaml"
- # objectStorageConfigFile: /var/secrets/object-store.yaml
-
- ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
- ## if using proxy extraContainer update targetPort with proxy container port
- containers: []
- # containers:
- # - name: oauth-proxy
- # image: quay.io/oauth2-proxy/oauth2-proxy:v7.3.0
- # args:
- # - --upstream=http://127.0.0.1:9093
- # - --http-address=0.0.0.0:8081
- # - ...
- # ports:
- # - containerPort: 8081
- # name: oauth-proxy
- # protocol: TCP
- # resources: {}
-
- ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
- ## (permissions, dir tree) on mounted volumes before starting prometheus
- initContainers: []
-
- ## PortName to use for Prometheus.
- ##
- portName: "http-web"
-
- ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
- ## on the file system of the Prometheus container e.g. bearer token files.
- arbitraryFSAccessThroughSMs: false
-
- ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
- ## or PodMonitor to true, this overrides honor_labels to false.
- overrideHonorLabels: false
-
- ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
- overrideHonorTimestamps: false
-
- ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor
- ## configs, and they will only discover endpoints within their current namespace. Defaults to false.
- ignoreNamespaceSelectors: false
-
- ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
- ## The label value will always be the namespace of the object that is being created.
- ## Disabled by default
- enforcedNamespaceLabel: ""
-
- ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
- ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
- ## Deprecated, use `excludedFromEnforcement` instead
- prometheusRulesExcludedFromEnforce: []
-
- ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
- ## to be excluded from enforcing a namespace label of origin.
- ## Works only if enforcedNamespaceLabel set to true.
- ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference
- excludedFromEnforcement: []
-
- ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
- ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
- ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
- ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
- queryLogFile: false
-
- ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
- ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
- ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
- enforcedSampleLimit: false
-
- ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
- ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
- ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
- ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
- enforcedTargetLimit: false
-
-
- ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
- ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
- ## 2.27.0 and newer.
- enforcedLabelLimit: false
-
- ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
- ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
- ## 2.27.0 and newer.
- enforcedLabelNameLengthLimit: false
-
- ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
- ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
- ## versions 2.27.0 and newer.
- enforcedLabelValueLengthLimit: false
-
- ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
- ## in Prometheus so it may change in any upcoming release.
- allowOverlappingBlocks: false
-
- ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
- ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
- minReadySeconds: 0
-
- # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
- # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
- # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
- # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
- hostNetwork: false
-
- # HostAlias holds the mapping between IP and hostnames that will be injected
- # as an entry in the pod’s hosts file.
- hostAliases: []
- # - ip: 10.10.0.100
- # hostnames:
- # - a1.app.local
- # - b1.app.local
-
- ## TracingConfig configures tracing in Prometheus.
- ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheustracingconfig
- tracingConfig: {}
-
- additionalRulesForClusterRole: []
- # - apiGroups: [ "" ]
- # resources:
- # - nodes/proxy
- # verbs: [ "get", "list", "watch" ]
-
- additionalServiceMonitors: []
- ## Name of the ServiceMonitor to create
- ##
- # - name: ""
-
- ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
- ## the chart
- ##
- # additionalLabels: {}
-
- ## Service label for use in assembling a job name of the form