From 4dfaba524237f1add33f8c28d834da5d17089a35 Mon Sep 17 00:00:00 2001 From: zadjadr Date: Tue, 8 Aug 2023 12:42:27 +0200 Subject: [PATCH 1/9] Bump Cilium to v1.14.2 --- pkg/apis/kops/validation/validation.go | 13 +- pkg/apis/kops/validation/validation_test.go | 6 +- pkg/model/components/cilium.go | 2 +- ....template => k8s-1.16-v1.14.yaml.template} | 797 ++++++++++++++---- .../cloudup/bootstrapchannelbuilder/cilium.go | 2 +- 5 files changed, 625 insertions(+), 195 deletions(-) rename upup/models/cloudup/resources/addons/networking.cilium.io/{k8s-1.16-v1.13.yaml.template => k8s-1.16-v1.14.yaml.template} (64%) diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index c1b1613345..01f77c7dd9 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -1244,8 +1244,8 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Could not parse as semantic version")) } - if version.Minor != 13 { - allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.13 is supported")) + if version.Minor != 14 { + allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.14 is supported")) } if v.Hubble != nil && fi.ValueOf(v.Hubble.Enabled) { @@ -1293,15 +1293,6 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe } allErrs = append(allErrs, IsValidValue(fldPath.Child("encryptionType"), &v.EncryptionType, []kops.CiliumEncryptionType{kops.CiliumEncryptionTypeIPSec, kops.CiliumEncryptionTypeWireguard})...) - - if v.EncryptionType == "wireguard" { - // Cilium with Wireguard integration follow-up --> https://github.com/cilium/cilium/issues/15462. - // The following rule of validation should be deleted as this combination - // will be supported on future releases of Cilium (>= v1.11.0). - if fi.ValueOf(v.EnableL7Proxy) { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("enableL7Proxy"), "L7 proxy cannot be enabled if wireguard is enabled.")) - } - } } if fi.ValueOf(v.EnableL7Proxy) && v.InstallIptablesRules != nil && !*v.InstallIptablesRules { diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index 279c86f7f9..537994f9e8 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -963,7 +963,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.13.5", + Version: "v1.14.2", Ingress: &kops.CiliumIngressSpec{ Enabled: fi.PtrTo(true), DefaultLoadBalancerMode: "bad-value", @@ -973,7 +973,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.13.5", + Version: "v1.14.2", Ingress: &kops.CiliumIngressSpec{ Enabled: fi.PtrTo(true), DefaultLoadBalancerMode: "dedicated", @@ -982,7 +982,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.13.5", + Version: "v1.14.2", Hubble: &kops.HubbleSpec{ Enabled: fi.PtrTo(true), }, diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index f7b8274e56..851e0b719b 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -40,7 +40,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Version == "" { - c.Version = "v1.13.5" + c.Version = "v1.14.2" } if c.EnableEndpointHealthChecking == nil { diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.13.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template similarity index 64% rename from upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.13.yaml.template rename to upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index a6df0ca642..0f1a163613 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.13.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -1,6 +1,7 @@ {{ with .Networking.Cilium }} {{ $semver := (trimPrefix "v" .Version) }} {{ $healthPort := (ternary 9879 9876 (semverCompare ">=1.11.6" $semver)) }} +{{ $operatorHealthPort := 9234 }} {{- if CiliumSecret }} apiVersion: v1 kind: Secret @@ -190,7 +191,13 @@ data: # - disabled # - vxlan (default) # - geneve - tunnel: "{{ .Tunnel }}" + {{ if eq .Tunnel "disabled" }} + # This option enables native-routing mode, in place of tunnel=disabled, now deprecated. + routing-mode: "native" + {{ else }} + routing-mode: "tunnel" + tunnel-protocol: "{{ .Tunnel }}" + {{ end }} # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: "{{ .ClusterName }}" @@ -200,6 +207,10 @@ data: cluster-id: "{{ .ClusterID }}" {{ end }} + remove-cilium-node-taints: "true" + set-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + # DNS response code for rejecting DNS requests, # available options are "nameError" and "refused" tofqdns-dns-reject-response-code: "{{ .ToFQDNsDNSRejectResponseCode }}" @@ -246,7 +257,7 @@ data: enable-host-reachable-services: "{{ .EnableHostReachableServices }}" {{ end }} enable-node-port: "{{ .EnableNodePort }}" - kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" + kube-proxy-replacement: "{{- if .EnableNodePort -}}true{{- else -}}false{{- end -}}" {{ with .IPAM }} ipam: {{ . }} @@ -305,6 +316,11 @@ data: ingress-lb-annotation-prefixes: "{{ .Ingress.LoadBalancerAnnotationPrefixes }}" {{ end }} {{ end }} + + # Tell the agent to generate and write a CNI configuration file + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist + cni-exclusive: "true" + cni-log-file: "/var/run/cilium/cilium-cni.log" {{ if WithDefaultBool .Hubble.Enabled false }} # Enable Hubble gRPC service. @@ -336,21 +352,45 @@ metadata: namespace: kube-system data: config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock + cluster-name: "{{ .ClusterName }}" + peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 + gops: true + gops-port: "9893" disable-server-tls: true tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - +--- +# Source: cilium/templates/hubble/peer-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: hubble-peer + namespace: kube-system + labels: + k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: hubble-peer +spec: + selector: + k8s-app: cilium + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + internalTrafficPolicy: Local {{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - networking.k8s.io @@ -387,6 +427,9 @@ rules: verbs: - list - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. - get - apiGroups: - cilium.io @@ -396,7 +439,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -404,6 +446,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -444,6 +490,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch --- @@ -451,6 +498,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - "" @@ -460,6 +509,25 @@ rules: - get - list - watch + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -471,8 +539,18 @@ rules: - apiGroups: - "" resources: - - nodes + # to perform LB IP allocation for BGP + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to check apiserver connectivity + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -481,8 +559,6 @@ rules: # to perform the translation of a CNP that contains `ToGroup` to its endpoints - services - endpoints - # to check apiserver connectivity - - namespaces verbs: - get - list @@ -580,7 +656,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -589,20 +664,37 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io - apiGroups: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: - ciliumloadbalancerippools/status verbs: - patch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". - apiGroups: - coordination.k8s.io resources: @@ -633,27 +725,65 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium subjects: - kind: ServiceAccount - name: cilium + name: "cilium" namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium-operator subjects: - kind: ServiceAccount - name: cilium-operator + name: "cilium-operator" namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Source: cilium/templates/cilium-agent/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: + - kind: ServiceAccount + name: "cilium" + namespace: kube-system {{ if WithDefaultBool .Ingress.Enabled false }} --- # Source: cilium/templates/cilium-agent/role.yaml @@ -674,7 +804,6 @@ rules: - list - watch --- -# Source: cilium/templates/cilium-agent/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -779,6 +908,7 @@ metadata: namespace: kube-system labels: k8s-app: hubble-relay + app.kubernetes.io/part-of: cilium spec: type: ClusterIP selector: @@ -792,21 +922,32 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: + name: cilium + namespace: kube-system labels: k8s-app: cilium kubernetes.io/cluster-service: "true" - name: cilium - namespace: kube-system + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium spec: selector: matchLabels: k8s-app: cilium kubernetes.io/cluster-service: "true" updateStrategy: - type: OnDelete + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate template: metadata: annotations: + # Set app AppArmor's profile to "unconfined". The value of this annotation + # can be modified as long users know which profiles they have available + # in AppArmor. + container.apparmor.security.beta.kubernetes.io/cilium-agent: "unconfined" + container.apparmor.security.beta.kubernetes.io/clean-cilium-state: "unconfined" + container.apparmor.security.beta.kubernetes.io/mount-cgroup: "unconfined" + container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: "unconfined" {{ if .EnablePrometheusMetrics }} # Annotation required for prometheus auto-discovery scraping # https://docs.cilium.io/en/v1.9/operations/metrics/#installation @@ -819,21 +960,17 @@ spec: labels: k8s-app: cilium kubernetes.io/cluster-service: "true" + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux containers: - - args: - - --config-dir=/tmp/cilium/config-map + - name: cilium-agent + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent command: - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map startupProbe: httpGet: host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' @@ -845,7 +982,7 @@ spec: value: "true" failureThreshold: 105 periodSeconds: 2 - successThreshold: + successThreshold: 1 livenessProbe: httpGet: host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' @@ -855,14 +992,10 @@ spec: httpHeaders: - name: "brief" value: "true" - failureThreshold: 10 periodSeconds: 30 successThreshold: 1 + failureThreshold: 10 timeoutSeconds: 5 - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} readinessProbe: httpGet: host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' @@ -872,10 +1005,9 @@ spec: httpHeaders: - name: "brief" value: "true" - failureThreshold: 3 - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 + failureThreshold: 3 timeoutSeconds: 5 env: - name: K8S_NODE_NAME @@ -910,21 +1042,47 @@ spec: - name: CILIUM_ENABLE_POLICY value: {{ . }} {{ end }} - image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent lifecycle: + {{ if eq .IPAM "eni" }} postStart: exec: command: - - /cni-install.sh - - --cni-exclusive=true + - "bash" + - "-c" + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore + fi + echo 'Done!' + {{- end }} preStop: exec: command: - /cni-uninstall.sh - name: cilium-agent - {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} + resources: + requests: + cpu: {{ or .CPURequest "25m" }} + memory: {{ or .MemoryRequest "128Mi" }} ports: + - name: peer-service + containerPort: 4244 + hostPort: 4244 + protocol: TCP {{ if .EnablePrometheusMetrics }} - containerPort: {{ .AgentPrometheusPort }} name: prometheus @@ -936,90 +1094,245 @@ spec: name: hubble-metrics protocol: TCP {{- end }} - {{ end }} terminationMessagePolicy: FallbackToLogsOnError securityContext: + {{- if ContainerdSELinuxEnabled }} + seLinuxOptions: + type: spc_t + level: s0 + {{- end }} + # Writing to /host/proc/sys/net does not work without a privileged container privileged: true + terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} - mountPropagation: Bidirectional - {{- end }} - - mountPath: /var/run/cilium - name: cilium-run + # Unprivileged containers need to mount /proc/sys/net from the host + # to have write access + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + # Unprivileged containers need to mount /proc/sys/kernel from the host + # to have write access + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - name: bpf-maps + mountPath: /sys/fs/bpf + # Unprivileged containers can't set mount propagation to bidirectional + # in this case we will mount the bpf fs from an init container that + # is privileged and set the mount propagation from host to container + # in Cilium. + mountPropagation: HostToContainer + - name: cilium-cgroup + mountPath: /run/cilium/cgroupv2 + - name: cilium-run + mountPath: /var/run/cilium {{- if not (semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver) }} - mountPath: /host/opt/cni/bin name: cni-path {{- end }} - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d {{ if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path + - name: etcd-config-path + mountPath: /var/lib/etcd-config readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets + - name: etcd-secrets + mountPath: /var/lib/etcd-secrets readOnly: true {{ end }} - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh readOnly: true - mountPath: /tmp/cilium/config-map name: cilium-config-path readOnly: true # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules + - name: lib-modules + mountPath: /lib/modules readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock -{{ if WithDefaultBool .Hubble.Enabled false }} - - mountPath: /var/lib/cilium/tls/hubble - name: hubble-tls - readOnly: true -{{ end }} + - name: xtables-lock + mountPath: /run/xtables.lock {{ if CiliumSecret }} - - mountPath: /etc/ipsec - name: cilium-ipsec-secrets + - name: cilium-ipsec-secrets + mountPath: /etc/ipsec {{ end }} - hostNetwork: true - initContainers: - {{- if semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver }} - - command: - - /install-plugin.sh +{{ if WithDefaultBool .Hubble.Enabled false }} + - name: hubble-tls + mountPath: /var/lib/cilium/tls/hubble + readOnly: true +{{ end }} + - name: tmp + mountPath: /tmp +{{ if .Debug }} + - name: cilium-monitor image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" imagePullPolicy: IfNotPresent - name: install-cni-binaries + command: + - /bin/bash + - -c + - -- + args: + - |- + for i in {1..5}; do \ + [ -S /var/run/cilium/monitor1_2.sock ] && break || sleep 10;\ + done; \ + cilium monitor --type=agent + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cilium-run + mountPath: /var/run/cilium +{{ end }} + initContainers: + - name: config + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: "{{ APIInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + volumeMounts: + - name: tmp + mountPath: /tmp + terminationMessagePolicy: FallbackToLogsOnError + # Required to mount cgroup2 filesystem on the underlying Kubernetes node. + # We use nsenter command with host's cgroup and mount namespaces enabled. + - name: mount-cgroup + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin resources: requests: cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: FallbackToLogsOnError + memory: 128Mi + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-path - {{- end }} - - command: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + {{- if ContainerdSELinuxEnabled }} + seLinuxOptions: + level: s0 + type: spc_t + {{- end }} + capabilities: + add: + # Only used for 'mount' cgroup + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + - name: apply-sysctl-overwrites + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + {{- if ContainerdSELinuxEnabled }} + seLinuxOptions: + level: s0 + type: spc_t + {{- end }} + capabilities: + add: + # Required in order to access host's /etc/sysctl.d dir + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + # Mount the bpf fs if it is not mounted. We will perform this task + # from a privileged container because the mount propagation bidirectional + # only works from privileged containers. + - name: mount-bpf-fs + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + args: + - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' + command: + - /bin/bash + - -c + - -- + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: clean-cilium-state + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: - /init-container.sh env: - name: CILIUM_ALL_STATE valueFrom: configMapKeyRef: - key: clean-cilium-state name: cilium-config + key: clean-cilium-state optional: true - name: CILIUM_BPF_STATE valueFrom: configMapKeyRef: - key: clean-cilium-bpf-state name: cilium-config + key: clean-cilium-bpf-state optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ APIInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} - name: CILIUM_WAIT_BPF_MOUNT valueFrom: @@ -1028,88 +1341,142 @@ spec: name: cilium-config optional: true {{- end }} - image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: clean-cilium-state terminationMessagePolicy: FallbackToLogsOnError securityContext: - privileged: true + {{- if ContainerdSELinuxEnabled }} + seLinuxOptions: + level: s0 + type: spc_t + {{- end }} + capabilities: + add: + # Most of the capabilities here are the same ones used in the + # cilium-agent's container because this container can be used to + # uninstall all Cilium resources, and therefore it is likely that + # will need the same capabilities. + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + drop: + - ALL volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps + - name: bpf-maps + mountPath: /sys/fs/bpf {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} mountPropagation: HostToContainer {{- end }} - # Required to mount cgroup filesystem from the host to cilium agent pod - - mountPath: /run/cilium/cgroupv2 - name: cilium-cgroup + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer - - mountPath: /var/run/cilium - name: cilium-run + - name: cilium-run + mountPath: /var/run/cilium + {{- if semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver }} + # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent + - name: install-cni-binaries + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - "/install-plugin.sh" resources: requests: cpu: 100m - memory: 100Mi - limits: - memory: 100Mi + memory: 10Mi + securityContext: + privileged: true + {{- if ContainerdSELinuxEnabled }} + seLinuxOptions: + level: s0 + type: spc_t + {{- end }} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + {{- end }} restartPolicy: Always priorityClassName: system-node-critical -{{ if ContainerdSELinuxEnabled }} - securityContext: - seLinuxOptions: - type: spc_t - level: s0 -{{ end }} - serviceAccount: cilium - serviceAccountName: cilium + serviceAccount: "cilium" + serviceAccountName: "cilium" + automountServiceAccountToken: true terminationGracePeriodSeconds: 1 + hostNetwork: true + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux tolerations: - - operator: Exists + - operator: Exists volumes: + # For sharing configuration between the "config" initContainer and the agent + - name: tmp + emptyDir: {} # To keep state between restarts / upgrades - - hostPath: + - name: cilium-run + hostPath: path: /var/run/cilium type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: path: /sys/fs/bpf type: DirectoryOrCreate - name: bpf-maps + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate # To install cilium cni plugin in the host - - hostPath: + - name: cni-path + hostPath: path: /opt/cni/bin type: DirectoryOrCreate - name: cni-path - # To keep state between restarts / upgrades for cgroup2 filesystem - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup # To install cilium cni configuration in the host - - hostPath: + - name: etc-cni-netd + hostPath: path: /etc/cni/net.d type: DirectoryOrCreate - name: etc-cni-netd # To be able to load kernel modules - - hostPath: + - name: lib-modules + hostPath: path: /lib/modules - name: lib-modules # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: + - name: xtables-lock + hostPath: path: /run/xtables.lock type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration {{- if .EtcdManaged }} # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 + - name: etcd-config-path + configMap: + name: cilium-config + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 items: - key: etcd-config path: etcd.config - name: cilium-config - name: etcd-config-path # To read the Cilium etcd secrets in case the user might want to use TLS - name: etcd-secrets hostPath: @@ -1117,24 +1484,52 @@ spec: type: Directory {{- end }} - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: cilium-clustermesh + optional: true + # note: items are not explicitly listed here, since the entries of this secret + # depend on the peers configured, and that would cause a restart of all agents + # at every addition/removal. Leaving the field empty makes each secret entry + # to be automatically projected into the volume as a file whose name is the key. + - secret: + name: clustermesh-apiserver-remote-cert + optional: true + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt - configMap: name: cilium-config name: cilium-config-path -{{ if CiliumSecret }} + {{- if CiliumSecret }} - name: cilium-ipsec-secrets secret: secretName: cilium-ipsec-keys -{{ end }} + {{- end }} + - name: host-proc-sys-net + hostPath: + path: /proc/sys/net + type: Directory + - name: host-proc-sys-kernel + hostPath: + path: /proc/sys/kernel + type: Directory {{ if WithDefaultBool .Hubble.Enabled false }} - name: hubble-tls - secret: - secretName: hubble-server-certs - optional: true + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-server-certs + optional: true {{ end }} --- apiVersion: apps/v1 @@ -1143,6 +1538,8 @@ metadata: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium name: cilium-operator namespace: kube-system spec: @@ -1165,6 +1562,8 @@ spec: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator spec: nodeSelector: null affinity: @@ -1177,13 +1576,26 @@ spec: - matchExpressions: - key: node-role.kubernetes.io/master operator: Exists + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists containers: - - args: + - name: cilium-operator + image: "{{ or .Registry "quay.io" }}/cilium/operator:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - cilium-operator + args: - "--config-dir=/tmp/cilium/config-map" - "--debug=$(CILIUM_DEBUG)" - "--eni-tags={{ CloudLabels }}" - command: - - cilium-operator env: - name: K8S_NODE_NAME valueFrom: @@ -1205,9 +1617,6 @@ spec: value: "{{ APIInternalName }}" - name: KUBERNETES_SERVICE_PORT value: "443" - image: "{{ or .Registry "quay.io" }}/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator {{ if .EnablePrometheusMetrics }} ports: - containerPort: 6942 @@ -1221,13 +1630,23 @@ spec: memory: {{ or .MemoryRequest "128Mi" }} livenessProbe: httpGet: - host: '127.0.0.1' + host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' path: /healthz - port: 9234 + port: {{ $operatorHealthPort }} scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 + readinessProbe: + httpGet: + host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' + path: /healthz + port: {{ $operatorHealthPort }} + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 5 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /tmp/cilium/config-map @@ -1296,9 +1715,11 @@ apiVersion: apps/v1 kind: Deployment metadata: name: hubble-relay + namespace: kube-system labels: k8s-app: hubble-relay - namespace: kube-system + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: replicas: 2 selector: @@ -1312,21 +1733,29 @@ spec: metadata: labels: k8s-app: hubble-relay + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: + securityContext: + fsGroup: 65532 containers: - name: hubble-relay image: "{{ or .Registry "quay.io" }}/cilium/hubble-relay:{{ .Version }}" imagePullPolicy: IfNotPresent + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 command: - hubble-relay args: - - "serve" - - "--peer-service=unix:///var/run/cilium/hubble.sock" - - "--listen-address=:4245" - env: - # unfortunately, the addon CAs use only CN - - name: GODEBUG - value: x509ignoreCN=0 + - serve + {{- if .Debug }} + - '--debug' + {{- end }} ports: - name: grpc containerPort: 4245 @@ -1336,46 +1765,51 @@ spec: livenessProbe: tcpSocket: port: grpc - terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir + - name: config + mountPath: /etc/hubble-relay readOnly: true - - mountPath: /etc/hubble-relay - name: config - readOnly: true - - mountPath: /var/lib/hubble-relay/tls - name: tls + - name: tls + mountPath: /var/lib/hubble-relay/tls readOnly: true + terminationMessagePolicy: FallbackToLogsOnError restartPolicy: Always - serviceAccount: hubble-relay - serviceAccountName: hubble-relay - terminationGracePeriodSeconds: 0 + serviceAccount: "hubble-relay" + serviceAccountName: "hubble-relay" + terminationGracePeriodSeconds: 1 + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux topologySpreadConstraints: - maxSkew: 1 - topologyKey: "topology.kubernetes.io/zone" + topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: k8s-app: hubble-relay - maxSkew: 1 - topologyKey: "kubernetes.io/hostname" + topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: k8s-app: hubble-relay volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - - configMap: + - name: config + configMap: name: hubble-relay-config items: - key: config.yaml path: config.yaml - name: config - - projected: + - name: tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 sources: - secret: name: hubble-relay-client-certs @@ -1386,13 +1820,14 @@ spec: path: client.key - key: ca.crt path: hubble-server-ca.crt - name: tls --- apiVersion: cert-manager.io/v1 kind: Certificate metadata: labels: k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium name: hubble-server-certs namespace: kube-system spec: @@ -1408,6 +1843,8 @@ kind: Certificate metadata: labels: k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium name: hubble-relay-client-certs namespace: kube-system spec: @@ -1430,6 +1867,8 @@ metadata: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium spec: selector: matchLabels: diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index 4bc5fdb8a1..7ba66ee6c9 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -35,7 +35,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { klog.Infof("found cilium (%q) in addons; won't use builtin", key) } else { id := "k8s-1.16" - location := key + "/" + id + "-v1.13.yaml" + location := key + "/" + id + "-v1.14.yaml" addon := &api.AddonSpec{ Name: fi.PtrTo(key), From ef8a1f3d7e601448fe95dd69915d0635914b3cbd Mon Sep 17 00:00:00 2001 From: zadjadr Date: Fri, 1 Sep 2023 22:05:27 +0200 Subject: [PATCH 2/9] Use privileged approach As done before updating to 1.14 This allows us to have a simpler update. We can add unprivileged mode later on (it was not working all the time for me) --- .../k8s-1.16-v1.14.yaml.template | 317 ++++-------------- 1 file changed, 70 insertions(+), 247 deletions(-) diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index 0f1a163613..8cd6a37433 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -352,45 +352,21 @@ metadata: namespace: kube-system data: config.yaml: | - cluster-name: "{{ .ClusterName }}" - peer-service: "hubble-peer.kube-system.svc.cluster.local:443" + peer-service: unix:///var/run/cilium/hubble.sock listen-address: :4245 - gops: true - gops-port: "9893" disable-server-tls: true tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt ---- -# Source: cilium/templates/hubble/peer-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: hubble-peer - namespace: kube-system - labels: - k8s-app: cilium - app.kubernetes.io/part-of: cilium - app.kubernetes.io/name: hubble-peer -spec: - selector: - k8s-app: cilium - ports: - - name: peer-service - port: 443 - protocol: TCP - targetPort: 4244 - internalTrafficPolicy: Local + {{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium - labels: - app.kubernetes.io/part-of: cilium rules: - apiGroups: - networking.k8s.io @@ -427,9 +403,6 @@ rules: verbs: - list - watch - # This is used when validating policies in preflight. This will need to stay - # until we figure out how to avoid "get" inside the preflight, and then - # should be removed ideally. - get - apiGroups: - cilium.io @@ -498,8 +471,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator - labels: - app.kubernetes.io/part-of: cilium rules: - apiGroups: - "" @@ -689,12 +660,6 @@ rules: - ciliumloadbalancerippools/status verbs: - patch -# For cilium-operator running in HA mode. -# -# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election -# between multiple running instances. -# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less -# common and fewer objects in the cluster watch "all Leases". - apiGroups: - coordination.k8s.io resources: @@ -725,30 +690,26 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium - labels: - app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium subjects: - kind: ServiceAccount - name: "cilium" + name: cilium namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-operator - labels: - app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium-operator subjects: - kind: ServiceAccount - name: "cilium-operator" + name: cilium-operator namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 @@ -908,7 +869,6 @@ metadata: namespace: kube-system labels: k8s-app: hubble-relay - app.kubernetes.io/part-of: cilium spec: type: ClusterIP selector: @@ -927,8 +887,6 @@ metadata: labels: k8s-app: cilium kubernetes.io/cluster-service: "true" - app.kubernetes.io/name: cilium-agent - app.kubernetes.io/part-of: cilium spec: selector: matchLabels: @@ -941,13 +899,6 @@ spec: template: metadata: annotations: - # Set app AppArmor's profile to "unconfined". The value of this annotation - # can be modified as long users know which profiles they have available - # in AppArmor. - container.apparmor.security.beta.kubernetes.io/cilium-agent: "unconfined" - container.apparmor.security.beta.kubernetes.io/clean-cilium-state: "unconfined" - container.apparmor.security.beta.kubernetes.io/mount-cgroup: "unconfined" - container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: "unconfined" {{ if .EnablePrometheusMetrics }} # Annotation required for prometheus auto-discovery scraping # https://docs.cilium.io/en/v1.9/operations/metrics/#installation @@ -960,8 +911,6 @@ spec: labels: k8s-app: cilium kubernetes.io/cluster-service: "true" - app.kubernetes.io/name: cilium-agent - app.kubernetes.io/part-of: cilium spec: containers: - name: cilium-agent @@ -996,6 +945,10 @@ spec: successThreshold: 1 failureThreshold: 10 timeoutSeconds: 5 + resources: + requests: + cpu: {{ or .CPURequest "25m" }} + memory: {{ or .MemoryRequest "128Mi" }} readinessProbe: httpGet: host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' @@ -1074,15 +1027,7 @@ spec: exec: command: - /cni-uninstall.sh - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} ports: - - name: peer-service - containerPort: 4244 - hostPort: 4244 - protocol: TCP {{ if .EnablePrometheusMetrics }} - containerPort: {{ .AgentPrometheusPort }} name: prometheus @@ -1096,37 +1041,21 @@ spec: {{- end }} terminationMessagePolicy: FallbackToLogsOnError securityContext: - {{- if ContainerdSELinuxEnabled }} - seLinuxOptions: - type: spc_t - level: s0 - {{- end }} - # Writing to /host/proc/sys/net does not work without a privileged container privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - # Unprivileged containers need to mount /proc/sys/net from the host - # to have write access - - mountPath: /host/proc/sys/net - name: host-proc-sys-net - # Unprivileged containers need to mount /proc/sys/kernel from the host - # to have write access - - mountPath: /host/proc/sys/kernel - name: host-proc-sys-kernel - name: bpf-maps mountPath: /sys/fs/bpf - # Unprivileged containers can't set mount propagation to bidirectional - # in this case we will mount the bpf fs from an init container that - # is privileged and set the mount propagation from host to container - # in Cilium. - mountPropagation: HostToContainer + {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} + mountPropagation: Bidirectional + {{- end }} - name: cilium-cgroup mountPath: /run/cilium/cgroupv2 - name: cilium-run mountPath: /var/run/cilium {{- if not (semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver) }} - - mountPath: /host/opt/cni/bin - name: cni-path + - name: cni-path + mountPath: /host/opt/cni/bin {{- end }} - name: etc-cni-netd mountPath: /host/etc/cni/net.d @@ -1141,8 +1070,8 @@ spec: - name: clustermesh-secrets mountPath: /var/lib/cilium/clustermesh readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path + - name: cilium-config-path + mountPath: /tmp/cilium/config-map readOnly: true # Needed to be able to load kernel modules - name: lib-modules @@ -1150,17 +1079,17 @@ spec: readOnly: true - name: xtables-lock mountPath: /run/xtables.lock -{{ if CiliumSecret }} - - name: cilium-ipsec-secrets - mountPath: /etc/ipsec -{{ end }} + - name: tmp + mountPath: /tmp {{ if WithDefaultBool .Hubble.Enabled false }} - name: hubble-tls mountPath: /var/lib/cilium/tls/hubble readOnly: true {{ end }} - - name: tmp - mountPath: /tmp +{{ if CiliumSecret }} + - mountPath: /etc/ipsec + name: cilium-ipsec-secrets +{{ end }} {{ if .Debug }} - name: cilium-monitor image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" @@ -1216,10 +1145,6 @@ spec: value: /run/cilium/cgroupv2 - name: BIN_PATH value: /opt/cni/bin - resources: - requests: - cpu: 100m - memory: 128Mi command: - sh - -ec @@ -1239,20 +1164,7 @@ spec: mountPath: /hostbin terminationMessagePolicy: FallbackToLogsOnError securityContext: - {{- if ContainerdSELinuxEnabled }} - seLinuxOptions: - level: s0 - type: spc_t - {{- end }} - capabilities: - add: - # Only used for 'mount' cgroup - - SYS_ADMIN - # Used for nsenter - - SYS_CHROOT - - SYS_PTRACE - drop: - - ALL + privileged: true - name: apply-sysctl-overwrites image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" imagePullPolicy: IfNotPresent @@ -1277,40 +1189,8 @@ spec: - name: cni-path mountPath: /hostbin terminationMessagePolicy: FallbackToLogsOnError - securityContext: - {{- if ContainerdSELinuxEnabled }} - seLinuxOptions: - level: s0 - type: spc_t - {{- end }} - capabilities: - add: - # Required in order to access host's /etc/sysctl.d dir - - SYS_ADMIN - # Used for nsenter - - SYS_CHROOT - - SYS_PTRACE - drop: - - ALL - # Mount the bpf fs if it is not mounted. We will perform this task - # from a privileged container because the mount propagation bidirectional - # only works from privileged containers. - - name: mount-bpf-fs - image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - args: - - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' - command: - - /bin/bash - - -c - - -- - terminationMessagePolicy: FallbackToLogsOnError securityContext: privileged: true - volumeMounts: - - name: bpf-maps - mountPath: /sys/fs/bpf - mountPropagation: Bidirectional - name: clean-cilium-state image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" imagePullPolicy: IfNotPresent @@ -1343,38 +1223,11 @@ spec: {{- end }} terminationMessagePolicy: FallbackToLogsOnError securityContext: - {{- if ContainerdSELinuxEnabled }} - seLinuxOptions: - level: s0 - type: spc_t - {{- end }} - capabilities: - add: - # Most of the capabilities here are the same ones used in the - # cilium-agent's container because this container can be used to - # uninstall all Cilium resources, and therefore it is likely that - # will need the same capabilities. - # Used since cilium modifies routing tables, etc... - - NET_ADMIN - # Used in iptables. Consider removing once we are iptables-free - - SYS_MODULE - # We need it for now but might not need it for >= 5.11 specially - # for the 'SYS_RESOURCE'. - # In >= 5.8 there's already BPF and PERMON capabilities - - SYS_ADMIN - # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC - - SYS_RESOURCE - # Both PERFMON and BPF requires kernel 5.8, container runtime - # cri-o >= v1.22.0 or containerd >= v1.5.0. - # If available, SYS_ADMIN can be removed. - #- PERFMON - #- BPF - drop: - - ALL + privileged: true volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf - {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} + {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} mountPropagation: HostToContainer {{- end }} # Required to mount cgroup filesystem from the host to cilium agent pod @@ -1389,18 +1242,16 @@ spec: image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" imagePullPolicy: IfNotPresent command: - - "/install-plugin.sh" + - /install-plugin.sh resources: requests: cpu: 100m memory: 10Mi securityContext: - privileged: true - {{- if ContainerdSELinuxEnabled }} - seLinuxOptions: - level: s0 - type: spc_t - {{- end }} + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - name: cni-path @@ -1408,9 +1259,14 @@ spec: {{- end }} restartPolicy: Always priorityClassName: system-node-critical - serviceAccount: "cilium" - serviceAccountName: "cilium" - automountServiceAccountToken: true +{{ if ContainerdSELinuxEnabled }} + securityContext: + seLinuxOptions: + type: spc_t + level: s0 +{{ end }} + serviceAccount: cilium + serviceAccountName: cilium terminationGracePeriodSeconds: 1 hostNetwork: true affinity: @@ -1423,7 +1279,7 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - operator: Exists + - operator: Exists volumes: # For sharing configuration between the "config" initContainer and the agent - name: tmp @@ -1508,19 +1364,11 @@ spec: - configMap: name: cilium-config name: cilium-config-path - {{- if CiliumSecret }} +{{ if CiliumSecret }} - name: cilium-ipsec-secrets secret: secretName: cilium-ipsec-keys - {{- end }} - - name: host-proc-sys-net - hostPath: - path: /proc/sys/net - type: Directory - - name: host-proc-sys-kernel - hostPath: - path: /proc/sys/kernel - type: Directory +{{ end }} {{ if WithDefaultBool .Hubble.Enabled false }} - name: hubble-tls projected: @@ -1538,8 +1386,6 @@ metadata: labels: io.cilium/app: operator name: cilium-operator - app.kubernetes.io/name: cilium-operator - app.kubernetes.io/part-of: cilium name: cilium-operator namespace: kube-system spec: @@ -1562,8 +1408,6 @@ spec: labels: io.cilium/app: operator name: cilium-operator - app.kubernetes.io/part-of: cilium - app.kubernetes.io/name: cilium-operator spec: nodeSelector: null affinity: @@ -1576,16 +1420,6 @@ spec: - matchExpressions: - key: node-role.kubernetes.io/master operator: Exists - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - io.cilium/app: operator - topologyKey: kubernetes.io/hostname - nodeSelector: - kubernetes.io/os: linux - tolerations: - - operator: Exists containers: - name: cilium-operator image: "{{ or .Registry "quay.io" }}/cilium/operator:{{ .Version }}" @@ -1718,8 +1552,6 @@ metadata: namespace: kube-system labels: k8s-app: hubble-relay - app.kubernetes.io/name: hubble-relay - app.kubernetes.io/part-of: cilium spec: replicas: 2 selector: @@ -1733,8 +1565,6 @@ spec: metadata: labels: k8s-app: hubble-relay - app.kubernetes.io/name: hubble-relay - app.kubernetes.io/part-of: cilium spec: securityContext: fsGroup: 65532 @@ -1752,10 +1582,13 @@ spec: command: - hubble-relay args: - - serve - {{- if .Debug }} - - '--debug' - {{- end }} + - "serve" + - "--peer-service=unix:///var/run/cilium/hubble.sock" + - "--listen-address=:4245" + env: + # unfortunately, the addon CAs use only CN + - name: GODEBUG + value: x509ignoreCN=0 ports: - name: grpc containerPort: 4245 @@ -1765,51 +1598,46 @@ spec: livenessProbe: tcpSocket: port: grpc - volumeMounts: - - name: config - mountPath: /etc/hubble-relay - readOnly: true - - name: tls - mountPath: /var/lib/hubble-relay/tls - readOnly: true terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true restartPolicy: Always - serviceAccount: "hubble-relay" - serviceAccountName: "hubble-relay" - terminationGracePeriodSeconds: 1 - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname - nodeSelector: - kubernetes.io/os: linux + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 topologySpreadConstraints: - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone + topologyKey: "topology.kubernetes.io/zone" whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: k8s-app: hubble-relay - maxSkew: 1 - topologyKey: kubernetes.io/hostname + topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: k8s-app: hubble-relay volumes: - - name: config - configMap: + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + - configMap: name: hubble-relay-config items: - key: config.yaml path: config.yaml - - name: tls - projected: - # note: the leading zero means this number is in octal representation: do not remove it - defaultMode: 0400 + name: config + - projected: sources: - secret: name: hubble-relay-client-certs @@ -1820,14 +1648,13 @@ spec: path: client.key - key: ca.crt path: hubble-server-ca.crt + name: tls --- apiVersion: cert-manager.io/v1 kind: Certificate metadata: labels: k8s-app: cilium - app.kubernetes.io/name: cilium-agent - app.kubernetes.io/part-of: cilium name: hubble-server-certs namespace: kube-system spec: @@ -1843,8 +1670,6 @@ kind: Certificate metadata: labels: k8s-app: cilium - app.kubernetes.io/name: cilium-agent - app.kubernetes.io/part-of: cilium name: hubble-relay-client-certs namespace: kube-system spec: @@ -1867,8 +1692,6 @@ metadata: labels: io.cilium/app: operator name: cilium-operator - app.kubernetes.io/name: cilium-operator - app.kubernetes.io/part-of: cilium spec: selector: matchLabels: From 981f23964a0b5cb39b7541d7b9a5b8631b7ffbcd Mon Sep 17 00:00:00 2001 From: zadjadr Date: Fri, 1 Sep 2023 22:05:42 +0200 Subject: [PATCH 3/9] Use hubble-relay peer service instead of socket mount --- .../k8s-1.16-v1.14.yaml.template | 83 ++++++++++++------- 1 file changed, 55 insertions(+), 28 deletions(-) diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index 8cd6a37433..82146dd2d2 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -352,7 +352,8 @@ metadata: namespace: kube-system data: config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock + cluster-name: "{{ .ClusterName }}" + peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 disable-server-tls: true @@ -360,7 +361,26 @@ data: tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - +--- +# Source: cilium/templates/hubble/peer-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: hubble-peer + namespace: kube-system + labels: + k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: hubble-peer +spec: + selector: + k8s-app: cilium + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + internalTrafficPolicy: Local {{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -1028,17 +1048,23 @@ spec: command: - /cni-uninstall.sh ports: - {{ if .EnablePrometheusMetrics }} - - containerPort: {{ .AgentPrometheusPort }} - name: prometheus + {{- if WithDefaultBool .Hubble.Enabled false }} + - name: peer-service + containerPort: 4244 + hostPort: 4244 protocol: TCP - {{ end }} {{- if .Hubble.Metrics }} - containerPort: 9091 hostPort: 9091 name: hubble-metrics protocol: TCP {{- end }} + {{- end }} + {{ if .EnablePrometheusMetrics }} + - containerPort: {{ .AgentPrometheusPort }} + name: prometheus + protocol: TCP + {{- end }} terminationMessagePolicy: FallbackToLogsOnError securityContext: privileged: true @@ -1582,13 +1608,10 @@ spec: command: - hubble-relay args: - - "serve" - - "--peer-service=unix:///var/run/cilium/hubble.sock" - - "--listen-address=:4245" - env: - # unfortunately, the addon CAs use only CN - - name: GODEBUG - value: x509ignoreCN=0 + - serve + {{- if .Debug }} + - '--debug' + {{- end }} ports: - name: grpc containerPort: 4245 @@ -1600,15 +1623,21 @@ spec: port: grpc terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir + - name: config + mountPath: /etc/hubble-relay readOnly: true - - mountPath: /etc/hubble-relay - name: config - readOnly: true - - mountPath: /var/lib/hubble-relay/tls - name: tls + - name: tls + mountPath: /var/lib/hubble-relay/tls readOnly: true + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux restartPolicy: Always serviceAccount: hubble-relay serviceAccountName: hubble-relay @@ -1627,17 +1656,16 @@ spec: matchLabels: k8s-app: hubble-relay volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - - configMap: + - name: config + configMap: name: hubble-relay-config items: - key: config.yaml path: config.yaml - name: config - - projected: + - name: tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 sources: - secret: name: hubble-relay-client-certs @@ -1648,7 +1676,6 @@ spec: path: client.key - key: ca.crt path: hubble-server-ca.crt - name: tls --- apiVersion: cert-manager.io/v1 kind: Certificate From 98dbfdc11e0a583f2a2b871070a0dec090f82574 Mon Sep 17 00:00:00 2001 From: zadjadr Date: Mon, 4 Sep 2023 11:44:23 +0200 Subject: [PATCH 4/9] Add labels --- .../k8s-1.16-v1.14.yaml.template | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index 82146dd2d2..44ab3a5256 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -265,6 +265,7 @@ data: enable-endpoint-routes: "true" auto-create-cilium-node-resource: "true" blacklist-conflicting-routes: "false" + eni-tags: "{{ CloudLabels }}" {{ end }} {{ end }} @@ -387,6 +388,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - networking.k8s.io @@ -491,6 +494,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - "" @@ -710,6 +715,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -723,6 +730,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -889,6 +898,8 @@ metadata: namespace: kube-system labels: k8s-app: hubble-relay + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: type: ClusterIP selector: @@ -906,6 +917,8 @@ metadata: namespace: kube-system labels: k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-agent kubernetes.io/cluster-service: "true" spec: selector: @@ -930,6 +943,8 @@ spec: {{- end }} labels: k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium kubernetes.io/cluster-service: "true" spec: containers: @@ -1068,7 +1083,6 @@ spec: terminationMessagePolicy: FallbackToLogsOnError securityContext: privileged: true - terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf @@ -1412,6 +1426,8 @@ metadata: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator name: cilium-operator namespace: kube-system spec: @@ -1434,6 +1450,8 @@ spec: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator spec: nodeSelector: null affinity: @@ -1578,6 +1596,8 @@ metadata: namespace: kube-system labels: k8s-app: hubble-relay + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: replicas: 2 selector: @@ -1591,6 +1611,8 @@ spec: metadata: labels: k8s-app: hubble-relay + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: securityContext: fsGroup: 65532 @@ -1697,6 +1719,7 @@ kind: Certificate metadata: labels: k8s-app: cilium + app.kubernetes.io/part-of: cilium name: hubble-relay-client-certs namespace: kube-system spec: From 2d95ec3d001a186ab5147a23aa94e6896e2bd67c Mon Sep 17 00:00:00 2001 From: zadjadr Date: Mon, 4 Sep 2023 18:02:20 +0200 Subject: [PATCH 5/9] Remove depricated cni option --- .../addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template | 1 - 1 file changed, 1 deletion(-) diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index 44ab3a5256..9c52451619 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -264,7 +264,6 @@ data: {{ if eq . "eni" }} enable-endpoint-routes: "true" auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" eni-tags: "{{ CloudLabels }}" {{ end }} {{ end }} From b0e12aa60d3ad77a7f5e7dcc22e8c820d1b0636b Mon Sep 17 00:00:00 2001 From: zadjadr Date: Tue, 5 Sep 2023 12:54:02 +0200 Subject: [PATCH 6/9] Revert addition of readiness probe from cilium-operator --- .../networking.cilium.io/k8s-1.16-v1.14.yaml.template | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index 9c52451619..a488fb7490 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -1514,16 +1514,6 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 - readinessProbe: - httpGet: - host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' - path: /healthz - port: {{ $operatorHealthPort }} - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 5 - timeoutSeconds: 3 - failureThreshold: 5 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /tmp/cilium/config-map From fdb601cefa2f564df83f2774412bfeaa30cb340c Mon Sep 17 00:00:00 2001 From: zadjadr Date: Mon, 11 Sep 2023 00:20:53 +0200 Subject: [PATCH 7/9] cilium: Set correct affinity & update strategy --- .../k8s-1.16-v1.14.yaml.template | 24 ++++++++----------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index a488fb7490..2d2748e881 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -925,9 +925,7 @@ spec: k8s-app: cilium kubernetes.io/cluster-service: "true" updateStrategy: - rollingUpdate: - maxUnavailable: 2 - type: RollingUpdate + type: OnDelete template: metadata: annotations: @@ -946,6 +944,15 @@ spec: app.kubernetes.io/part-of: cilium kubernetes.io/cluster-service: "true" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux containers: - name: cilium-agent image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" @@ -1308,15 +1315,6 @@ spec: serviceAccountName: cilium terminationGracePeriodSeconds: 1 hostNetwork: true - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname - nodeSelector: - kubernetes.io/os: linux tolerations: - operator: Exists volumes: @@ -1647,8 +1645,6 @@ spec: matchLabels: k8s-app: cilium topologyKey: kubernetes.io/hostname - nodeSelector: - kubernetes.io/os: linux restartPolicy: Always serviceAccount: hubble-relay serviceAccountName: hubble-relay From 30aa24f6debec554233997423f71b5b3f802c347 Mon Sep 17 00:00:00 2001 From: zadjadr Date: Tue, 24 Oct 2023 16:59:21 +0200 Subject: [PATCH 8/9] Update to Cilium 1.14.3 --- pkg/apis/kops/validation/validation_test.go | 6 +++--- pkg/model/components/cilium.go | 2 +- .../networking.cilium.io/k8s-1.16-v1.14.yaml.template | 2 +- .../tests/bootstrapchannelbuilder/cilium/manifest.yaml | 4 ++-- .../metrics-server/insecure-1.19/manifest.yaml | 4 ++-- .../metrics-server/secure-1.19/manifest.yaml | 4 ++-- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index 537994f9e8..cd68cd86cb 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -963,7 +963,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.14.2", + Version: "v1.14.3", Ingress: &kops.CiliumIngressSpec{ Enabled: fi.PtrTo(true), DefaultLoadBalancerMode: "bad-value", @@ -973,7 +973,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.14.2", + Version: "v1.14.3", Ingress: &kops.CiliumIngressSpec{ Enabled: fi.PtrTo(true), DefaultLoadBalancerMode: "dedicated", @@ -982,7 +982,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.14.2", + Version: "v1.14.3", Hubble: &kops.HubbleSpec{ Enabled: fi.PtrTo(true), }, diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index 851e0b719b..6b12303c79 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -40,7 +40,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Version == "" { - c.Version = "v1.14.2" + c.Version = "v1.14.3" } if c.EnableEndpointHealthChecking == nil { diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index 2d2748e881..d8837d7456 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -316,7 +316,7 @@ data: ingress-lb-annotation-prefixes: "{{ .Ingress.LoadBalancerAnnotationPrefixes }}" {{ end }} {{ end }} - + # Tell the agent to generate and write a CNI configuration file write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist cni-exclusive: "true" diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index dd9d29f14e..4c90f009a6 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 0bd17ce3cf13710024d3ef8ba18921b9c67b68c1a78a41872ec196f3808c7095 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml index 6f27e94b8c..1b3280a1c0 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml @@ -112,8 +112,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 0bd17ce3cf13710024d3ef8ba18921b9c67b68c1a78a41872ec196f3808c7095 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml index 94f1083de3..0a760eea31 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml @@ -169,8 +169,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 0bd17ce3cf13710024d3ef8ba18921b9c67b68c1a78a41872ec196f3808c7095 name: networking.cilium.io needsRollingUpdate: all selector: From 2cd697aef2af30f47a7ba2b25fe8a7bc4245da97 Mon Sep 17 00:00:00 2001 From: zadjadr Date: Tue, 24 Oct 2023 17:09:19 +0200 Subject: [PATCH 9/9] hack/update-expected.sh --- ...s_s3_object_cluster-completed.spec_content | 2 +- ...-ipv6.example.com-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 272 +++++++++++--- .../minimal-ipv6-cilium/kubernetes.tf | 2 +- ...des.minimal-warmpool.example.com_user_data | 2 +- ...s_s3_object_cluster-completed.spec_content | 2 +- ...mpool.example.com-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 271 +++++++++++--- .../aws_s3_object_nodeupconfig-nodes_content | 4 +- .../minimal-warmpool/kubernetes.tf | 2 +- ...s_s3_object_cluster-completed.spec_content | 2 +- ...minimal.k8s.local-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 271 +++++++++++--- .../minimal_scaleway/kubernetes.tf | 2 +- ...s_s3_object_cluster-completed.spec_content | 2 +- ...ilium.example.com-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 291 +++++++++++++-- .../privatecilium-eni/kubernetes.tf | 2 +- ...s_s3_object_cluster-completed.spec_content | 2 +- ...ilium.example.com-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 271 +++++++++++--- .../privatecilium/kubernetes.tf | 2 +- ...s_s3_object_cluster-completed.spec_content | 2 +- ...ilium.example.com-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 352 +++++++++++++++--- .../privatecilium2/kubernetes.tf | 2 +- ...s_s3_object_cluster-completed.spec_content | 2 +- ...anced.example.com-addons-bootstrap_content | 4 +- ...dons-networking.cilium.io-k8s-1.16_content | 293 ++++++++++++--- .../privateciliumadvanced/kubernetes.tf | 2 +- .../cilium/manifest.yaml | 2 +- .../insecure-1.19/manifest.yaml | 2 +- .../metrics-server/secure-1.19/manifest.yaml | 2 +- 33 files changed, 1738 insertions(+), 351 deletions(-) diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content index 92406d3cbc..8db1b475dc 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content @@ -226,7 +226,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content index f5ec4509f5..ea762272f4 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content @@ -105,8 +105,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 33440a8acbacd86a9b5cd6c44eabf93e591e6cdfd0245feae791b75ddc579a3c + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 5e9537b8396c3b141b62b590b619883148d224463969f32eddd9af9601e7b79e name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content index 67095aa820..96533389f8 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,18 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: disabled + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +90,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +138,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +145,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +188,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +201,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +213,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +239,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +256,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +343,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +351,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +363,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +398,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +419,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +433,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +485,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -408,6 +501,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -455,14 +550,9 @@ spec: value: api.internal.minimal-ipv6.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -481,6 +571,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -491,7 +582,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -512,12 +602,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -533,25 +625,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-ipv6.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -568,26 +713,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-ipv6.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -596,6 +758,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -604,14 +768,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -624,10 +792,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -643,6 +823,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -663,6 +845,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -705,11 +889,11 @@ spec: value: api.internal.minimal-ipv6.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - host: 127.0.0.1 + host: ::1 path: /healthz port: 9234 scheme: HTTP diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf b/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf index b95b25f989..033e3572e2 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf @@ -944,7 +944,7 @@ resource "aws_s3_object" "minimal-ipv6-example-com-addons-limit-range-addons-k8s resource "aws_s3_object" "minimal-ipv6-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data b/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data index c43b43a3d0..4d593e5346 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data @@ -151,7 +151,7 @@ ConfigServer: - https://kops-controller.internal.minimal-warmpool.example.com:3988/ InstanceGroupName: nodes InstanceGroupRole: Node -NodeupConfigHash: ixr/jHtjzunYpmsBwkCwqeEL1lBBh7cpOWEMPi1HAvA= +NodeupConfigHash: Ic8Yx6WnZ6jJljDBQI2bf2kvkOboul3gKc16oTpHlDI= __EOF_KUBE_ENV diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content index 599d2ab743..8705a2072a 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content @@ -218,7 +218,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content index 3b0f4c2339..37fee5155c 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 3ce725cc07a4344fb82f4666145c6dd4070d10217a9bf43939bada12094cce95 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: d939c9dc17f34da1cf748f890373d6c0d474f5a08e022306c2feaa9f116f2781 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content index 8137a58172..75f3628afa 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,19 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +91,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +139,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +146,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +189,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +202,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +214,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +240,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +257,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +344,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +352,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +364,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +399,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +420,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +434,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +486,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -408,6 +502,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -455,14 +551,9 @@ spec: value: api.internal.minimal-warmpool.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -481,6 +572,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -491,7 +583,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -512,12 +603,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -533,25 +626,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-warmpool.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -568,26 +714,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-warmpool.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -596,6 +759,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -604,14 +769,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -624,10 +793,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -643,6 +824,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -663,6 +846,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -705,7 +890,7 @@ spec: value: api.internal.minimal-warmpool.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content index 75052880d5..4d666fa278 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content @@ -60,8 +60,8 @@ containerdConfig: usesLegacyGossip: false usesNoneDNS: false warmPoolImages: -- quay.io/cilium/cilium:v1.13.5 -- quay.io/cilium/operator:v1.13.5 +- quay.io/cilium/cilium:v1.14.3 +- quay.io/cilium/operator:v1.14.3 - registry.k8s.io/kube-proxy:v1.26.0 - registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1 - registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.6 diff --git a/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf b/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf index add0853e60..b57d51c081 100644 --- a/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf @@ -712,7 +712,7 @@ resource "aws_s3_object" "minimal-warmpool-example-com-addons-limit-range-addons resource "aws_s3_object" "minimal-warmpool-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content index 33772d3893..1efebf0d00 100644 --- a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content @@ -199,7 +199,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nonMasqueradeCIDR: 100.64.0.0/10 podCIDR: 100.96.0.0/11 secretStore: memfs://tests/scw-minimal.k8s.local/secrets diff --git a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content index 98a6828ee2..c8e8962cbb 100644 --- a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content @@ -54,8 +54,8 @@ spec: k8s-addon: scaleway-csi-driver.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 0965eae063f29669172d217374bc812d27eab79b5e2daeeda759de9ba7fdfeb6 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 5ecc7aca559459d06d6474991ec3b6e034f75834c971213fec620aeafe31eb71 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content index 3d7ec9e62b..e78ea075fc 100644 --- a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,19 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: strict + kube-proxy-replacement: "true" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +91,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +139,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +146,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +189,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +202,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +214,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +240,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +257,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +344,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +352,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +364,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +399,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +420,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +434,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +486,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -408,6 +502,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -455,14 +551,9 @@ spec: value: api.internal.scw-minimal.k8s.local - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -481,6 +572,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -491,7 +583,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -512,12 +603,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -533,25 +626,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.scw-minimal.k8s.local + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -568,26 +714,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.scw-minimal.k8s.local + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -596,6 +759,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -604,14 +769,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -624,10 +793,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -643,6 +824,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -663,6 +846,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -705,7 +890,7 @@ spec: value: api.internal.scw-minimal.k8s.local - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf b/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf index 86370f4f65..2b726d3d8d 100644 --- a/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf +++ b/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf @@ -149,7 +149,7 @@ resource "aws_s3_object" "scw-minimal-k8s-local-addons-limit-range-addons-k8s-io resource "aws_s3_object" "scw-minimal-k8s-local-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content") - key = "tests/scw-minimal.k8s.local/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "tests/scw-minimal.k8s.local/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content index dec90d094e..b11ac971c3 100644 --- a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content @@ -220,7 +220,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index 71707900d6..5b31df378d 100644 --- a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: f477be8a0899c266e8a71d80fc70ddd61b6564455ce75560b877d92c6f12a762 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: dc0ffacc5b54ff7ce6d48ad648b291624ae47bbd80cbdd5268f48bc866a6cf3e name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index cead27d822..1eb13148b5 100644 --- a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -29,7 +29,6 @@ data: agent-health-port: "9879" auto-create-cilium-node-resource: "true" auto-direct-node-routes: "false" - blacklist-conflicting-routes: "false" bpf-ct-global-any-max: "262144" bpf-ct-global-tcp-max: "524288" bpf-lb-algorithm: random @@ -41,6 +40,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -56,18 +57,23 @@ data: enable-remote-node-identity: "true" enable-service-topology: "false" enable-unreachable-routes: "false" + eni-tags: KubernetesCluster=privatecilium.example.com identity-allocation-mode: crd identity-change-grace-period: 5s install-iptables-rules: "true" ipam: eni - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: disabled + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -87,6 +93,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -134,7 +141,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -142,6 +148,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -181,6 +191,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -193,6 +204,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -204,6 +216,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -215,8 +242,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -224,7 +259,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -312,7 +346,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -321,6 +354,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -329,10 +366,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -357,6 +401,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -377,6 +422,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -390,6 +436,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -397,6 +488,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -411,6 +504,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -458,14 +553,34 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: - - /cni-install.sh - - --cni-exclusive=true + - bash + - -c + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore + fi + echo 'Done!' preStop: exec: command: @@ -484,6 +599,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -494,7 +610,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -515,12 +630,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -536,25 +653,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -571,26 +741,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -599,6 +786,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -607,14 +796,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -627,10 +820,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -646,6 +851,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -666,6 +873,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -708,7 +917,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf b/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf index 71e93ebb1d..b0554750c3 100644 --- a/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf @@ -1032,7 +1032,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content index bf5024fe76..f0d079561b 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content @@ -228,7 +228,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index 61596698b6..9a959b39ab 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 1bcf01aca5730c31ac7b86d72831968485235c479566cf6a26da17ede4f0c351 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 0d92f3aaa5fcfde3239fba0d07f4d264580c460d4a5e9c2463f8e2b20434c479 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index 41754e389f..d942bc1c28 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,19 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +91,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +139,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +146,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +189,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +202,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +214,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +240,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +257,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +344,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +352,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +364,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +399,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +420,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +434,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +486,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -412,6 +506,8 @@ spec: test3: awesome creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -459,14 +555,9 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -485,6 +576,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -495,7 +587,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -516,12 +607,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -537,25 +630,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -572,26 +718,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -600,6 +763,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -608,14 +773,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -628,10 +797,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -647,6 +828,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -671,6 +854,8 @@ spec: test3: cilium-operator creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -713,7 +898,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privatecilium/kubernetes.tf b/tests/integration/update_cluster/privatecilium/kubernetes.tf index 71e93ebb1d..b0554750c3 100644 --- a/tests/integration/update_cluster/privatecilium/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium/kubernetes.tf @@ -1032,7 +1032,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content index 3bcf34deea..097e64f09f 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content @@ -229,7 +229,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index 8270f07a17..fe46fb5a81 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -161,8 +161,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: ba5c764f4ddeb058c0dc7fd9287d445a6a3e8f186dbac9d63daf56770d81c24c + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 3d77641c2e3c89adfb55bf74f41d865e0af29ff859eb75b2795cbe915d73b827 name: networking.cilium.io needsPKI: true needsRollingUpdate: all diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index 15d23aab41..d1a728fedf 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -53,6 +53,8 @@ data: cgroup-root: /run/cilium/cgroupv2 cluster-id: "253" cluster-name: privatecilium.example.com + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -87,14 +89,19 @@ data: ingress-shared-lb-service-name: private-ingress install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -109,8 +116,9 @@ metadata: apiVersion: v1 data: - config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock + config.yaml: |- + cluster-name: "privatecilium.example.com" + peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 disable-server-tls: true @@ -130,6 +138,31 @@ metadata: --- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: hubble-peer + app.kubernetes.io/part-of: cilium + k8s-app: cilium + role.kubernetes.io/networking: "1" + name: hubble-peer + namespace: kube-system +spec: + internalTrafficPolicy: Local + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + selector: + k8s-app: cilium + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -137,6 +170,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -184,7 +218,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -192,6 +225,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -231,6 +268,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -243,6 +281,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -254,6 +293,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -265,8 +319,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -274,7 +336,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -362,7 +423,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -371,6 +431,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -379,10 +443,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -422,6 +493,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -442,6 +514,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -455,6 +528,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -567,6 +685,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium k8s-app: hubble-relay role.kubernetes.io/networking: "1" name: hubble-relay @@ -589,6 +709,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -603,6 +725,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -650,14 +774,9 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -676,6 +795,11 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: + - containerPort: 4244 + hostPort: 4244 + name: peer-service + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: @@ -686,7 +810,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -707,12 +830,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -728,28 +853,81 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp - mountPath: /var/lib/cilium/tls/hubble name: hubble-tls readOnly: true hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -766,26 +944,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -794,6 +989,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -802,14 +999,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -822,17 +1023,32 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path - name: hubble-tls - secret: - optional: true - secretName: hubble-server-certs + projected: + defaultMode: 256 + sources: + - secret: + name: hubble-server-certs + optional: true updateStrategy: type: OnDelete @@ -845,6 +1061,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -865,6 +1083,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -907,7 +1127,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -965,6 +1185,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium k8s-app: hubble-relay role.kubernetes.io/networking: "1" name: hubble-relay @@ -982,20 +1204,24 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium k8s-app: hubble-relay kops.k8s.io/managed-by: kops spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname containers: - args: - serve - - --peer-service=unix:///var/run/cilium/hubble.sock - - --listen-address=:4245 command: - hubble-relay - env: - - name: GODEBUG - value: x509ignoreCN=0 - image: quay.io/cilium/hubble-relay:v1.13.5 + image: quay.io/cilium/hubble-relay:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: tcpSocket: @@ -1007,11 +1233,15 @@ spec: readinessProbe: tcpSocket: port: grpc + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir - readOnly: true - mountPath: /etc/hubble-relay name: config readOnly: true @@ -1019,6 +1249,8 @@ spec: name: tls readOnly: true restartPolicy: Always + securityContext: + fsGroup: 65532 serviceAccount: hubble-relay serviceAccountName: hubble-relay terminationGracePeriodSeconds: 0 @@ -1036,10 +1268,6 @@ spec: topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - configMap: items: - key: config.yaml @@ -1048,6 +1276,7 @@ spec: name: config - name: tls projected: + defaultMode: 256 sources: - secret: items: @@ -1089,6 +1318,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium k8s-app: cilium role.kubernetes.io/networking: "1" name: hubble-relay-client-certs diff --git a/tests/integration/update_cluster/privatecilium2/kubernetes.tf b/tests/integration/update_cluster/privatecilium2/kubernetes.tf index 6b22a6111e..10d9ea6f44 100644 --- a/tests/integration/update_cluster/privatecilium2/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium2/kubernetes.tf @@ -1048,7 +1048,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content index 6cdad8fed9..9c0e1e7d75 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content @@ -232,7 +232,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content index 4dc20bdb4d..aece7a3747 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: c6b553b26348b9bda91297615c885dfeb20ec41a56cdeedcf433255bd62d4d58 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 1dc85a0c4d6148f60695875f169977272f69564eb1ee8a5cf6c4c7687376449d name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content index c6f3d54f17..65c81dd427 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -29,7 +29,6 @@ data: agent-health-port: "9879" auto-create-cilium-node-resource: "true" auto-direct-node-routes: "false" - blacklist-conflicting-routes: "false" bpf-ct-global-any-max: "262144" bpf-ct-global-tcp-max: "524288" bpf-lb-algorithm: random @@ -41,6 +40,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,6 +58,7 @@ data: enable-remote-node-identity: "true" enable-service-topology: "false" enable-unreachable-routes: "false" + eni-tags: KubernetesCluster=privateciliumadvanced.example.com etcd-config: |- --- endpoints: @@ -69,16 +71,20 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: eni - kube-proxy-replacement: strict + kube-proxy-replacement: "true" kvstore: etcd kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: disabled + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -98,6 +104,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -145,7 +152,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -153,6 +159,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -192,6 +202,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -204,6 +215,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -215,6 +227,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -226,8 +253,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -235,7 +270,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -323,7 +357,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -332,6 +365,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -340,10 +377,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -368,6 +412,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -388,6 +433,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -401,6 +447,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -408,6 +499,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -422,6 +515,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -469,14 +564,34 @@ spec: value: api.internal.privateciliumadvanced.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: - - /cni-install.sh - - --cni-exclusive=true + - bash + - -c + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore + fi + echo 'Done!' preStop: exec: command: @@ -495,6 +610,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -505,7 +621,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -526,12 +641,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -553,25 +670,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privateciliumadvanced.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -588,26 +758,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privateciliumadvanced.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -616,6 +803,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -624,14 +813,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -644,7 +837,7 @@ spec: type: FileOrCreate name: xtables-lock - configMap: - defaultMode: 420 + defaultMode: 256 items: - key: etcd-config path: etcd.config @@ -655,10 +848,22 @@ spec: type: Directory name: etcd-secrets - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -674,6 +879,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -694,6 +901,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -736,7 +945,7 @@ spec: value: api.internal.privateciliumadvanced.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf b/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf index 5ae576329e..5d60a67f38 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf +++ b/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf @@ -1065,7 +1065,7 @@ resource "aws_s3_object" "privateciliumadvanced-example-com-addons-limit-range-a resource "aws_s3_object" "privateciliumadvanced-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index 4c90f009a6..5e6b6fb335 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -99,7 +99,7 @@ spec: version: 9.99.0 - id: k8s-1.16 manifest: networking.cilium.io/k8s-1.16-v1.14.yaml - manifestHash: 0bd17ce3cf13710024d3ef8ba18921b9c67b68c1a78a41872ec196f3808c7095 + manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml index 1b3280a1c0..40e0b93316 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml @@ -113,7 +113,7 @@ spec: version: 9.99.0 - id: k8s-1.16 manifest: networking.cilium.io/k8s-1.16-v1.14.yaml - manifestHash: 0bd17ce3cf13710024d3ef8ba18921b9c67b68c1a78a41872ec196f3808c7095 + manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml index 0a760eea31..021076650f 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml @@ -170,7 +170,7 @@ spec: version: 9.99.0 - id: k8s-1.16 manifest: networking.cilium.io/k8s-1.16-v1.14.yaml - manifestHash: 0bd17ce3cf13710024d3ef8ba18921b9c67b68c1a78a41872ec196f3808c7095 + manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183 name: networking.cilium.io needsRollingUpdate: all selector: