diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index 0ce1cc0374..85bd39b3db 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -40,7 +40,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Version == "" { - c.Version = "v1.11.4" + c.Version = "v1.11.5" } if c.EnableEndpointHealthChecking == nil { diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content index 2635fffe37..b6068c1745 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content @@ -223,7 +223,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.11.4 + version: v1.11.5 nonMasqueradeCIDR: ::/0 secretStore: memfs://clusters.example.com/minimal-ipv6.example.com/secrets serviceClusterIPRange: fd00:5e4f:ce::/108 diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content index 96fc30eeab..7eaf818f6b 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content @@ -53,8 +53,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 6969c542728bd5bd6dce4120a6f911d9a70462646014a1e21e13985ef9e56610 + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 72833833b7b7bc871347d491216e2f8496df762e8bfb5775c71424c878dc48c5 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content index 2d8805e717..c30ef05fe3 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -57,6 +57,7 @@ data: kube-proxy-replacement: partial masquerade: "false" monitor-aggregation: medium + nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused @@ -105,36 +106,16 @@ rules: resources: - namespaces - services - - nodes - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: + - endpoints - nodes verbs: - get - list - watch - - update - apiGroups: - "" resources: - - nodes - nodes/status verbs: - patch @@ -153,21 +134,15 @@ rules: resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - ciliumendpoints - ciliumendpoints/status - - ciliumendpoints/finalizers - ciliumnodes - ciliumnodes/status - - ciliumnodes/finalizers - ciliumidentities - - ciliumidentities/finalizers - ciliumlocalredirectpolicies - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - ciliumegressnatpolicies verbs: - '*' @@ -193,6 +168,20 @@ rules: - list - watch - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -379,7 +368,7 @@ spec: value: api.internal.minimal-ipv6.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent lifecycle: postStart: @@ -475,7 +464,7 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent name: clean-cilium-state resources: @@ -611,7 +600,7 @@ spec: value: api.internal.minimal-ipv6.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.11.4 + image: quay.io/cilium/operator:v1.11.5 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf b/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf index 82055576e6..016d077cd5 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf @@ -735,7 +735,7 @@ resource "aws_s3_object" "minimal-ipv6-example-com-addons-limit-range-addons-k8s resource "aws_s3_object" "minimal-ipv6-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml" + key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data b/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data index 15a4599617..f4a0a54dea 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data @@ -166,7 +166,7 @@ CloudProvider: aws ConfigBase: memfs://clusters.example.com/minimal-warmpool.example.com InstanceGroupName: nodes InstanceGroupRole: Node -NodeupConfigHash: VB+wVXktis1pq5AJsf340nKkPeJX5/1mlbANYgA3jQ0= +NodeupConfigHash: dG1Qo5mvZr8F1z5gSortWbpvKxDlufN7K+OddwzKcUg= __EOF_KUBE_ENV diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content index a0a97fa61a..14fc487cd5 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content @@ -206,7 +206,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.11.4 + version: v1.11.5 nonMasqueradeCIDR: 100.64.0.0/10 podCIDR: 100.96.0.0/11 secretStore: memfs://clusters.example.com/minimal-warmpool.example.com/secrets diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content index 65183ff40b..3d1b33c3f8 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content @@ -53,8 +53,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 5c6574fadfcf3b3870b94a648c42724d0d1444f0af039acba69a83ae2f44e56b + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 375707f2c54fb09676a0ce6e971a2de8213860aeee1ed7f3abd179313dc0490d name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content index 89e3b06825..159a779d61 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -57,6 +57,7 @@ data: kube-proxy-replacement: partial masquerade: "true" monitor-aggregation: medium + nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused @@ -105,36 +106,16 @@ rules: resources: - namespaces - services - - nodes - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: + - endpoints - nodes verbs: - get - list - watch - - update - apiGroups: - "" resources: - - nodes - nodes/status verbs: - patch @@ -153,21 +134,15 @@ rules: resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - ciliumendpoints - ciliumendpoints/status - - ciliumendpoints/finalizers - ciliumnodes - ciliumnodes/status - - ciliumnodes/finalizers - ciliumidentities - - ciliumidentities/finalizers - ciliumlocalredirectpolicies - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - ciliumegressnatpolicies verbs: - '*' @@ -193,6 +168,20 @@ rules: - list - watch - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -379,7 +368,7 @@ spec: value: api.internal.minimal-warmpool.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent lifecycle: postStart: @@ -475,7 +464,7 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent name: clean-cilium-state resources: @@ -611,7 +600,7 @@ spec: value: api.internal.minimal-warmpool.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.11.4 + image: quay.io/cilium/operator:v1.11.5 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content index ed887ccaa0..5bf1667f76 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content @@ -67,8 +67,8 @@ containerdConfig: logLevel: info version: 1.4.12 warmPoolImages: -- quay.io/cilium/cilium:v1.11.4 -- quay.io/cilium/operator:v1.11.4 +- quay.io/cilium/cilium:v1.11.5 +- quay.io/cilium/operator:v1.11.5 - registry.k8s.io/kube-proxy:v1.21.0 - registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0 - registry.k8s.io/sig-storage/livenessprobe:v2.2.0 diff --git a/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf b/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf index daeb61d184..a9184e9237 100644 --- a/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf @@ -618,7 +618,7 @@ resource "aws_s3_object" "minimal-warmpool-example-com-addons-limit-range-addons resource "aws_s3_object" "minimal-warmpool-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml" + key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content index e77f7a784d..290bc26352 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content @@ -192,7 +192,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.11.4 + version: v1.11.5 nonMasqueradeCIDR: 100.64.0.0/10 podCIDR: 100.96.0.0/11 secretStore: memfs://clusters.example.com/privatecilium.example.com/secrets diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index c95f0430f9..9f165c08a3 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -53,8 +53,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 8e556db94dd0040ecf256f2af58dba3ba330d443a289f9950a9a695faad258ea + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 377cd477c63b62dbf7a458500824a0d9c2e227cebb334b6f5c70a9ceaa3a6b98 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index d84e6d6ec6..41d2a5d91d 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -57,6 +57,7 @@ data: kube-proxy-replacement: partial masquerade: "true" monitor-aggregation: medium + nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused @@ -105,36 +106,16 @@ rules: resources: - namespaces - services - - nodes - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: + - endpoints - nodes verbs: - get - list - watch - - update - apiGroups: - "" resources: - - nodes - nodes/status verbs: - patch @@ -153,21 +134,15 @@ rules: resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - ciliumendpoints - ciliumendpoints/status - - ciliumendpoints/finalizers - ciliumnodes - ciliumnodes/status - - ciliumnodes/finalizers - ciliumidentities - - ciliumidentities/finalizers - ciliumlocalredirectpolicies - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - ciliumegressnatpolicies verbs: - '*' @@ -193,6 +168,20 @@ rules: - list - watch - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -379,7 +368,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent lifecycle: postStart: @@ -475,7 +464,7 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent name: clean-cilium-state resources: @@ -611,7 +600,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.11.4 + image: quay.io/cilium/operator:v1.11.5 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privatecilium/kubernetes.tf b/tests/integration/update_cluster/privatecilium/kubernetes.tf index 4596cef684..b0bdfeba7a 100644 --- a/tests/integration/update_cluster/privatecilium/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium/kubernetes.tf @@ -901,7 +901,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content index 2f4130d307..e4a98181de 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content @@ -202,7 +202,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.11.4 + version: v1.11.5 nonMasqueradeCIDR: 100.64.0.0/10 podCIDR: 100.96.0.0/11 secretStore: memfs://clusters.example.com/privateciliumadvanced.example.com/secrets diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content index 585c4b3f8e..dc6a6cca58 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content @@ -53,8 +53,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 58f1a38f57a73a5d32f7e6c97b3c7ea2435f1afeea87056bef2e880cfa091c96 + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 791bef8c3da2a69f8954d83393b97b0964db6e1d3056b49140850cb46563416b name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content index 23ba770492..0d2499c0c7 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -71,6 +71,7 @@ data: kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' masquerade: "false" monitor-aggregation: medium + nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused @@ -119,36 +120,16 @@ rules: resources: - namespaces - services - - nodes - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: + - endpoints - nodes verbs: - get - list - watch - - update - apiGroups: - "" resources: - - nodes - nodes/status verbs: - patch @@ -167,21 +148,15 @@ rules: resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - ciliumendpoints - ciliumendpoints/status - - ciliumendpoints/finalizers - ciliumnodes - ciliumnodes/status - - ciliumnodes/finalizers - ciliumidentities - - ciliumidentities/finalizers - ciliumlocalredirectpolicies - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - ciliumegressnatpolicies verbs: - '*' @@ -207,6 +182,20 @@ rules: - list - watch - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -393,7 +382,7 @@ spec: value: api.internal.privateciliumadvanced.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent lifecycle: postStart: @@ -495,7 +484,7 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.11.4 + image: quay.io/cilium/cilium:v1.11.5 imagePullPolicy: IfNotPresent name: clean-cilium-state resources: @@ -642,7 +631,7 @@ spec: value: api.internal.privateciliumadvanced.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.11.4 + image: quay.io/cilium/operator:v1.11.5 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf b/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf index 348fdf5a3f..36c43c81e0 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf +++ b/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf @@ -933,7 +933,7 @@ resource "aws_s3_object" "privateciliumadvanced-example-com-addons-limit-range-a resource "aws_s3_object" "privateciliumadvanced-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.10.yaml" + key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template new file mode 100644 index 0000000000..bf68d9fe51 --- /dev/null +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template @@ -0,0 +1,1170 @@ +{{ with .Networking.Cilium }} +{{ $semver := (trimPrefix "v" .Version) }} +{{- if CiliumSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +stringData: + {{ CiliumSecret }} +--- +{{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-relay + namespace: kube-system +{{ end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + +{{- if .EtcdManaged }} + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' + + etcd-config: |- + --- + endpoints: + - https://{{ $.MasterInternalName }}:4003 + + trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' + key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' + cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt' + + enable-k8s-event-handover: "true" +{{ end }} + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in a kvstore, etcd or consul, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + # (default crd) + identity-allocation-mode: "{{ .IdentityAllocationMode }}" + + # Time to wait before using new identity on endpoint identity change (default 5s) + identity-change-grace-period: "{{ .IdentityChangeGracePeriod }}" + + # If you want to run cilium in debug mode change this value to true + debug: "{{ .Debug }}" + + {{ if .EnablePrometheusMetrics }} + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. + prometheus-serve-addr: ":{{ .AgentPrometheusPort }}" + operator-prometheus-serve-addr: ":6942" + enable-metrics: "true" + {{ end }} + + {{ if .EnableEncryption }} + {{ if eq .EncryptionType "ipsec" }} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + {{ else if eq .EncryptionType "wireguard" }} + enable-wireguard: "true" + {{ end }} + {{ end }} + + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "{{ not IsIPv6Only }}" + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "{{ IsIPv6Only }}" + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ .MonitorAggregation }}" + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" + bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" + + # BPF load balancing algorithm ("random", "maglev") (default "random") + bpf-lb-algorithm: "{{ .BPFLBAlgorithm }}" + + # Maglev per service backend table size (parameter M) (default 16381) + bpf-lb-maglev-table-size: "{{ .BPFLBMaglevTableSize }}" + + # bpf-nat-global-max specified the maximum number of entries in the + # BPF NAT table. (default 524288) + bpf-nat-global-max: "{{ .BPFNATGlobalMax }}" + + # bpf-neigh-global-max specified the maximum number of entries in the + # BPF neighbor table. (default 524288) + bpf-neigh-global-max: "{{ .BPFNeighGlobalMax }}" + + # bpf-policy-map-max specifies the maximum number of entries in endpoint + # policy map (per endpoint) (default 16384) + bpf-policy-map-max: "{{ .BPFPolicyMapMax }}" + + # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, + # backend and affinity maps. (default 65536) + bpf-lb-map-max: "{{ .BPFLBMapMax }}" + + # bpf-lb-sock-hostns-only enables skipping socket LB for services when inside a pod namespace, + # in favor of service LB at the pod interface. Socket LB is still used when in the host namespace. + # Required by service mesh (e.g., Istio, Linkerd). (default false) + bpf-lb-sock-hostns-only: "{{ .BPFLBSockHostNSOnly }}" + + {{ if .ChainingMode }} + cni-chaining-mode: "{{ .ChainingMode }}" + {{ end }} + + # enable-bpf-masquerade enables masquerading packets from endpoints leaving + # the host with BPF instead of iptables. (default false) + enable-bpf-masquerade: "{{ and (WithDefaultBool .EnableBPFMasquerade false) (not IsIPv6Only) }}" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}" + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "{{ .Tunnel }}" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ .ClusterName }}" + + # DNS response code for rejecting DNS requests, + # available options are "nameError" and "refused" + tofqdns-dns-reject-response-code: "{{ .ToFQDNsDNSRejectResponseCode }}" + # This option is disabled by default starting from version 1.4.x in favor + # of a more powerful DNS proxy-based implementation, see [0] for details. + # Enable this option if you want to use FQDN policies but do not want to use + # the DNS proxy. + # + # To ease upgrade, users may opt to set this option to "true". + # Otherwise please refer to the Upgrade Guide [1] which explains how to + # prepare policy rules for upgrade. + # + # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based + # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action + tofqdns-enable-poller: "{{- if .ToFQDNsEnablePoller -}}true{{- else -}}false{{- end -}}" + {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" + {{- end }} + # Enable fetching of container-runtime specific metadata + # + # By default, the Kubernetes pod and namespace labels are retrieved and + # associated with endpoints for identification purposes. By integrating + # with the container runtime, container runtime specific labels can be + # retrieved, such labels will be prefixed with container: + # + # CAUTION: The container runtime labels can include information such as pod + # annotations which may result in each pod being associated a unique set of + # labels which can result in excessive security identities being allocated. + # Please review the labels filter when enabling container runtime labels. + # + # Supported values: + # - containerd + # - crio + # - docker + # - none + # - auto (automatically detect the container runtime) + # + masquerade: "{{ .Masquerade }}" + enable-ipv6-masquerade: "false" + install-iptables-rules: "{{ WithDefaultBool .InstallIptablesRules true }}" + auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}" + {{ if .EnableHostReachableServices }} + enable-host-reachable-services: "{{ .EnableHostReachableServices }}" + {{ end }} + enable-node-port: "{{ .EnableNodePort }}" + kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" + + {{ with .IPAM }} + ipam: {{ . }} + {{ if eq . "eni" }} + enable-endpoint-routes: "true" + auto-create-cilium-node-resource: "true" + blacklist-conflicting-routes: "false" + {{ end }} + {{ end }} + + # Disables usage of CiliumEndpoint CRD + disable-endpoint-crd: "{{ .DisableEndpointCRD }}" + + # Enable connectivity health checking between virtual endpoints (default true) + enable-endpoint-health-checking: "{{ .EnableEndpointHealthChecking }}" + + # Enable use of remote node identity (default false) + enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity }}" + + # enable-l7-proxy enables L7 proxy for L7 policy enforcement. (default true) + enable-l7-proxy: "{{ .EnableL7Proxy }}" + + cgroup-root: /run/cilium/cgroupv2 + + disable-cnp-status-updates: "{{ .DisableCNPStatusUpdates }}" + nodes-gc-interval: "5m0s" + + enable-service-topology: "{{ .EnableServiceTopology }}" + + {{ if WithDefaultBool .Hubble.Enabled false }} + # Enable Hubble gRPC service. + enable-hubble: "true" + # UNIX domain socket for Hubble server to listen to. + hubble-socket-path: "/var/run/cilium/hubble.sock" + # An additional address for Hubble server to listen to (e.g. ":4244"). + hubble-listen-address: ":4244" + hubble-disable-tls: "false" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/tls.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/tls.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/ca.crt + {{ if .Hubble.Metrics }} + hubble-metrics-server: ":9091" + hubble-metrics: + {{- range .Hubble.Metrics }} + {{ . }} + {{- end }} + {{ end }} + {{ end }} + +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + peer-service: unix:///var/run/cilium/hubble.sock + listen-address: :4245 + + disable-server-tls: true + + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt + +{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - list + - watch + - update + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + - ciliumnodes + - ciliumnodes/status + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumegressnatpolicies + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + port: 80 + targetPort: 4245 +{{ end }} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + updateStrategy: + type: OnDelete + template: + metadata: + annotations: + # This annotation plus the CriticalAddonsOnly toleration makes + # cilium to be a critical pod in the cluster, which ensures cilium + # gets priority scheduling. + # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ + scheduler.alpha.kubernetes.io/critical-pod: "" + {{ if .EnablePrometheusMetrics }} + # Annotation required for prometheus auto-discovery scraping + # https://docs.cilium.io/en/v1.9/operations/metrics/#installation + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .AgentPrometheusPort }}" + {{ end }} +{{- with .AgentPodAnnotations }} + {{- . | nindent 8 }} +{{- end }} + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + startupProbe: + httpGet: + host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 105 + periodSeconds: 2 + successThreshold: + livenessProbe: + httpGet: + host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: + requests: + cpu: {{ or .CPURequest "25m" }} + memory: {{ or .MemoryRequest "128Mi" }} + readinessProbe: + httpGet: + host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ $.MasterInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + {{ with .EnablePolicy }} + - name: CILIUM_ENABLE_POLICY + value: {{ . }} + {{ end }} + image: "quay.io/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - /cni-install.sh + - --cni-exclusive=true + preStop: + exec: + command: + - /cni-uninstall.sh + name: cilium-agent + {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} + ports: + {{ if .EnablePrometheusMetrics }} + - containerPort: {{ .AgentPrometheusPort }} + name: prometheus + protocol: TCP + {{ end }} + {{- if .Hubble.Metrics }} + - containerPort: 9091 + hostPort: 9091 + name: hubble-metrics + protocol: TCP + {{- end }} + {{ end }} + + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} + mountPropagation: Bidirectional + {{- end }} + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd +{{ if .EtcdManaged }} + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true +{{ end }} + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + # Needed to be able to load kernel modules + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock +{{ if WithDefaultBool .Hubble.Enabled false }} + - mountPath: /var/lib/cilium/tls/hubble + name: hubble-tls + readOnly: true +{{ end }} +{{ if CiliumSecret }} + - mountPath: /etc/ipsec + name: cilium-ipsec-secrets +{{ end }} + hostNetwork: true + initContainers: + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true + {{- end }} + image: "quay.io/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} + mountPropagation: HostToContainer + {{- end }} + # Required to mount cgroup filesystem from the host to cilium agent pod + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup + mountPropagation: HostToContainer + - mountPath: /var/run/cilium + name: cilium-run + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + # To keep state between restarts / upgrades for bpf maps + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + # To install cilium cni plugin in the host + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + # To keep state between restarts / upgrades for cgroup2 filesystem + - hostPath: + path: /run/cilium/cgroupv2 + type: Directory + name: cilium-cgroup + # To install cilium cni configuration in the host + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + # To be able to load kernel modules + - hostPath: + path: /lib/modules + name: lib-modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + # To read the clustermesh configuration +{{- if .EtcdManaged }} + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the Cilium etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: /etc/kubernetes/pki/cilium + type: Directory +{{- end }} + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +{{ if CiliumSecret }} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{{ end }} +{{ if WithDefaultBool .Hubble.Enabled false }} + - name: hubble-tls + secret: + secretName: hubble-server-certs + optional: true +{{ end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + io.cilium/app: operator + name: cilium-operator + name: cilium-operator + namespace: kube-system +spec: + replicas: {{ ControlPlaneControllerReplicas false }} + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + io.cilium/app: operator + name: cilium-operator + spec: + nodeSelector: null + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - "--config-dir=/tmp/cilium/config-map" + - "--debug=$(CILIUM_DEBUG)" + - "--eni-tags={{ CloudLabels }}" + command: + - cilium-operator + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ $.MasterInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: "quay.io/cilium/operator:{{ .Version }}" + imagePullPolicy: IfNotPresent + name: cilium-operator + {{ if .EnablePrometheusMetrics }} + ports: + - containerPort: 6942 + hostPort: 6942 + name: prometheus + protocol: TCP + {{ end }} + resources: + requests: + cpu: {{ or .CPURequest "25m" }} + memory: {{ or .MemoryRequest "128Mi" }} + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true +{{- if .EtcdManaged }} + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true +{{- end }} + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: "topology.kubernetes.io/zone" + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + - maxSkew: 1 + topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + volumes: + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +{{- if .EtcdManaged }} + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: /etc/kubernetes/pki/cilium + type: Directory +{{- end }} +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/charts/hubble-relay/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: hubble-relay + spec: + containers: + - name: hubble-relay + image: "quay.io/cilium/hubble-relay:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - hubble-relay + args: + - "serve" + - "--peer-service=unix:///var/run/cilium/hubble.sock" + - "--listen-address=:4245" + env: + # unfortunately, the addon CAs use only CN + - name: GODEBUG + value: x509ignoreCN=0 + ports: + - name: grpc + containerPort: 4245 + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: "topology.kubernetes.io/zone" + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + k8s-app: hubble-relay + - maxSkew: 1 + topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + k8s-app: hubble-relay + volumes: + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + - key: ca.crt + path: hubble-server-ca.crt + name: tls +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + k8s-app: cilium + name: hubble-server-certs + namespace: kube-system +spec: + dnsNames: + - "*.default.hubble-grpc.cilium.io" + issuerRef: + kind: Issuer + name: networking.cilium.io + secretName: hubble-server-certs +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + k8s-app: cilium + name: hubble-relay-client-certs + namespace: kube-system +spec: + dnsNames: + - "hubble-relay-client" + issuerRef: + kind: Issuer + name: networking.cilium.io + usages: + - client auth + secretName: hubble-relay-client-certs +{{ end }} +{{ end }} +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + maxUnavailable: 1 diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index ba4828bb2d..f36e9fc2f5 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -64,11 +64,28 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { } addons.Add(addon) } - } else if ver.Minor == 10 || ver.Minor == 11 { + } else if ver.Minor == 10 || (ver.Minor == 11 && ver.Patch < 5) { { id := "k8s-1.16" location := key + "/" + id + "-v1.10.yaml" + addon := &api.AddonSpec{ + Name: fi.String(key), + Selector: networkingSelector(), + Manifest: fi.String(location), + Id: id, + NeedsRollingUpdate: "all", + } + if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { + addon.NeedsPKI = true + } + addons.Add(addon) + } + } else if ver.Minor == 11 { + { + id := "k8s-1.16" + location := key + "/" + id + "-v1.11.yaml" + addon := &api.AddonSpec{ Name: fi.String(key), Selector: networkingSelector(), diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index 1537a1c04e..d8a5f0026f 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -53,8 +53,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 86abb46767e969d69039cc0b1205f37259d3c02969c655d3c0acb300d9deb5ea + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 1a6377642426fac2aee248a206b8bde8fd58dda23d1d2cc138ed05a0ea2469ea name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml index fffb96621b..3ec9045d60 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml @@ -60,8 +60,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 86abb46767e969d69039cc0b1205f37259d3c02969c655d3c0acb300d9deb5ea + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 1a6377642426fac2aee248a206b8bde8fd58dda23d1d2cc138ed05a0ea2469ea name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml index 040ed11972..a496d2f23c 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml @@ -67,8 +67,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.10.yaml - manifestHash: 86abb46767e969d69039cc0b1205f37259d3c02969c655d3c0acb300d9deb5ea + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 1a6377642426fac2aee248a206b8bde8fd58dda23d1d2cc138ed05a0ea2469ea name: networking.cilium.io needsRollingUpdate: all selector: