diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index af2b5c6f0d..c5354de5ef 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -1262,8 +1262,8 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Could not parse as semantic version")) } - if version.Minor != 13 { - allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.13 is supported")) + if version.Minor != 14 { + allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.14 is supported")) } if v.Hubble != nil && fi.ValueOf(v.Hubble.Enabled) { @@ -1311,15 +1311,6 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe } allErrs = append(allErrs, IsValidValue(fldPath.Child("encryptionType"), &v.EncryptionType, []kops.CiliumEncryptionType{kops.CiliumEncryptionTypeIPSec, kops.CiliumEncryptionTypeWireguard})...) - - if v.EncryptionType == "wireguard" { - // Cilium with Wireguard integration follow-up --> https://github.com/cilium/cilium/issues/15462. - // The following rule of validation should be deleted as this combination - // will be supported on future releases of Cilium (>= v1.11.0). - if fi.ValueOf(v.EnableL7Proxy) { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("enableL7Proxy"), "L7 proxy cannot be enabled if wireguard is enabled.")) - } - } } if fi.ValueOf(v.EnableL7Proxy) && v.InstallIptablesRules != nil && !*v.InstallIptablesRules { diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index 278742a57b..d8e8d05b43 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -1023,7 +1023,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.13.5", + Version: "v1.14.3", Ingress: &kops.CiliumIngressSpec{ Enabled: fi.PtrTo(true), DefaultLoadBalancerMode: "bad-value", @@ -1033,7 +1033,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.13.5", + Version: "v1.14.3", Ingress: &kops.CiliumIngressSpec{ Enabled: fi.PtrTo(true), DefaultLoadBalancerMode: "dedicated", @@ -1042,7 +1042,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.13.5", + Version: "v1.14.3", Hubble: &kops.HubbleSpec{ Enabled: fi.PtrTo(true), }, diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index f7b8274e56..6b12303c79 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -40,7 +40,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Version == "" { - c.Version = "v1.13.5" + c.Version = "v1.14.3" } if c.EnableEndpointHealthChecking == nil { diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content index 92406d3cbc..8db1b475dc 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content @@ -226,7 +226,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content index f5ec4509f5..ea762272f4 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content @@ -105,8 +105,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 33440a8acbacd86a9b5cd6c44eabf93e591e6cdfd0245feae791b75ddc579a3c + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 5e9537b8396c3b141b62b590b619883148d224463969f32eddd9af9601e7b79e name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content index 67095aa820..96533389f8 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,18 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: disabled + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +90,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +138,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +145,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +188,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +201,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +213,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +239,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +256,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +343,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +351,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +363,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +398,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +419,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +433,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +485,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -408,6 +501,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -455,14 +550,9 @@ spec: value: api.internal.minimal-ipv6.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -481,6 +571,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -491,7 +582,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -512,12 +602,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -533,25 +625,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-ipv6.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -568,26 +713,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-ipv6.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -596,6 +758,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -604,14 +768,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -624,10 +792,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -643,6 +823,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -663,6 +845,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -705,11 +889,11 @@ spec: value: api.internal.minimal-ipv6.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - host: 127.0.0.1 + host: ::1 path: /healthz port: 9234 scheme: HTTP diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf b/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf index b95b25f989..033e3572e2 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/kubernetes.tf @@ -944,7 +944,7 @@ resource "aws_s3_object" "minimal-ipv6-example-com-addons-limit-range-addons-k8s resource "aws_s3_object" "minimal-ipv6-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data b/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data index c43b43a3d0..4d593e5346 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_launch_template_nodes.minimal-warmpool.example.com_user_data @@ -151,7 +151,7 @@ ConfigServer: - https://kops-controller.internal.minimal-warmpool.example.com:3988/ InstanceGroupName: nodes InstanceGroupRole: Node -NodeupConfigHash: ixr/jHtjzunYpmsBwkCwqeEL1lBBh7cpOWEMPi1HAvA= +NodeupConfigHash: Ic8Yx6WnZ6jJljDBQI2bf2kvkOboul3gKc16oTpHlDI= __EOF_KUBE_ENV diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content index 599d2ab743..8705a2072a 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content @@ -218,7 +218,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content index 3b0f4c2339..37fee5155c 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 3ce725cc07a4344fb82f4666145c6dd4070d10217a9bf43939bada12094cce95 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: d939c9dc17f34da1cf748f890373d6c0d474f5a08e022306c2feaa9f116f2781 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content index 8137a58172..75f3628afa 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,19 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +91,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +139,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +146,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +189,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +202,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +214,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +240,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +257,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +344,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +352,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +364,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +399,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +420,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +434,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +486,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -408,6 +502,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -455,14 +551,9 @@ spec: value: api.internal.minimal-warmpool.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -481,6 +572,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -491,7 +583,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -512,12 +603,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -533,25 +626,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-warmpool.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -568,26 +714,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.minimal-warmpool.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -596,6 +759,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -604,14 +769,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -624,10 +793,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -643,6 +824,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -663,6 +846,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -705,7 +890,7 @@ spec: value: api.internal.minimal-warmpool.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content index 75052880d5..4d666fa278 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_nodeupconfig-nodes_content @@ -60,8 +60,8 @@ containerdConfig: usesLegacyGossip: false usesNoneDNS: false warmPoolImages: -- quay.io/cilium/cilium:v1.13.5 -- quay.io/cilium/operator:v1.13.5 +- quay.io/cilium/cilium:v1.14.3 +- quay.io/cilium/operator:v1.14.3 - registry.k8s.io/kube-proxy:v1.26.0 - registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1 - registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.6 diff --git a/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf b/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf index add0853e60..b57d51c081 100644 --- a/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf +++ b/tests/integration/update_cluster/minimal-warmpool/kubernetes.tf @@ -712,7 +712,7 @@ resource "aws_s3_object" "minimal-warmpool-example-com-addons-limit-range-addons resource "aws_s3_object" "minimal-warmpool-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_minimal-warmpool.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/minimal-warmpool.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content index 33772d3893..1efebf0d00 100644 --- a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_cluster-completed.spec_content @@ -199,7 +199,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nonMasqueradeCIDR: 100.64.0.0/10 podCIDR: 100.96.0.0/11 secretStore: memfs://tests/scw-minimal.k8s.local/secrets diff --git a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content index 98a6828ee2..c8e8962cbb 100644 --- a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content +++ b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-bootstrap_content @@ -54,8 +54,8 @@ spec: k8s-addon: scaleway-csi-driver.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 0965eae063f29669172d217374bc812d27eab79b5e2daeeda759de9ba7fdfeb6 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 5ecc7aca559459d06d6474991ec3b6e034f75834c971213fec620aeafe31eb71 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content index 3d7ec9e62b..e78ea075fc 100644 --- a/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/minimal_scaleway/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,19 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: strict + kube-proxy-replacement: "true" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +91,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +139,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +146,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +189,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +202,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +214,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +240,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +257,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +344,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +352,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +364,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +399,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +420,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +434,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +486,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -408,6 +502,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -455,14 +551,9 @@ spec: value: api.internal.scw-minimal.k8s.local - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -481,6 +572,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -491,7 +583,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -512,12 +603,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -533,25 +626,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.scw-minimal.k8s.local + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -568,26 +714,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.scw-minimal.k8s.local + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -596,6 +759,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -604,14 +769,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -624,10 +793,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -643,6 +824,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -663,6 +846,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -705,7 +890,7 @@ spec: value: api.internal.scw-minimal.k8s.local - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf b/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf index 86370f4f65..2b726d3d8d 100644 --- a/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf +++ b/tests/integration/update_cluster/minimal_scaleway/kubernetes.tf @@ -149,7 +149,7 @@ resource "aws_s3_object" "scw-minimal-k8s-local-addons-limit-range-addons-k8s-io resource "aws_s3_object" "scw-minimal-k8s-local-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_scw-minimal.k8s.local-addons-networking.cilium.io-k8s-1.16_content") - key = "tests/scw-minimal.k8s.local/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "tests/scw-minimal.k8s.local/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content index dec90d094e..b11ac971c3 100644 --- a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content @@ -220,7 +220,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index 71707900d6..5b31df378d 100644 --- a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: f477be8a0899c266e8a71d80fc70ddd61b6564455ce75560b877d92c6f12a762 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: dc0ffacc5b54ff7ce6d48ad648b291624ae47bbd80cbdd5268f48bc866a6cf3e name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index cead27d822..1eb13148b5 100644 --- a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -29,7 +29,6 @@ data: agent-health-port: "9879" auto-create-cilium-node-resource: "true" auto-direct-node-routes: "false" - blacklist-conflicting-routes: "false" bpf-ct-global-any-max: "262144" bpf-ct-global-tcp-max: "524288" bpf-lb-algorithm: random @@ -41,6 +40,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -56,18 +57,23 @@ data: enable-remote-node-identity: "true" enable-service-topology: "false" enable-unreachable-routes: "false" + eni-tags: KubernetesCluster=privatecilium.example.com identity-allocation-mode: crd identity-change-grace-period: 5s install-iptables-rules: "true" ipam: eni - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: disabled + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -87,6 +93,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -134,7 +141,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -142,6 +148,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -181,6 +191,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -193,6 +204,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -204,6 +216,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -215,8 +242,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -224,7 +259,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -312,7 +346,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -321,6 +354,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -329,10 +366,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -357,6 +401,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -377,6 +422,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -390,6 +436,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -397,6 +488,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -411,6 +504,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -458,14 +553,34 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: - - /cni-install.sh - - --cni-exclusive=true + - bash + - -c + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore + fi + echo 'Done!' preStop: exec: command: @@ -484,6 +599,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -494,7 +610,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -515,12 +630,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -536,25 +653,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -571,26 +741,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -599,6 +786,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -607,14 +796,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -627,10 +820,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -646,6 +851,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -666,6 +873,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -708,7 +917,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf b/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf index 71e93ebb1d..b0554750c3 100644 --- a/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf @@ -1032,7 +1032,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content index bf5024fe76..f0d079561b 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content @@ -228,7 +228,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index 61596698b6..9a959b39ab 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 1bcf01aca5730c31ac7b86d72831968485235c479566cf6a26da17ede4f0c351 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 0d92f3aaa5fcfde3239fba0d07f4d264580c460d4a5e9c2463f8e2b20434c479 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index 41754e389f..d942bc1c28 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -39,6 +39,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,14 +59,19 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -84,6 +91,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -131,7 +139,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -139,6 +146,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -178,6 +189,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -190,6 +202,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -201,6 +214,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -212,8 +240,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -221,7 +257,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -309,7 +344,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -318,6 +352,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -326,10 +364,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -354,6 +399,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -374,6 +420,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -387,6 +434,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -394,6 +486,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -412,6 +506,8 @@ spec: test3: awesome creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -459,14 +555,9 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -485,6 +576,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -495,7 +587,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -516,12 +607,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -537,25 +630,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -572,26 +718,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -600,6 +763,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -608,14 +773,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -628,10 +797,22 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -647,6 +828,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -671,6 +854,8 @@ spec: test3: cilium-operator creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -713,7 +898,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privatecilium/kubernetes.tf b/tests/integration/update_cluster/privatecilium/kubernetes.tf index 71e93ebb1d..b0554750c3 100644 --- a/tests/integration/update_cluster/privatecilium/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium/kubernetes.tf @@ -1032,7 +1032,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content index 3bcf34deea..097e64f09f 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content @@ -229,7 +229,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: vxlan - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content index 8270f07a17..fe46fb5a81 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -161,8 +161,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: ba5c764f4ddeb058c0dc7fd9287d445a6a3e8f186dbac9d63daf56770d81c24c + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 3d77641c2e3c89adfb55bf74f41d865e0af29ff859eb75b2795cbe915d73b827 name: networking.cilium.io needsPKI: true needsRollingUpdate: all diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content index 15d23aab41..d1a728fedf 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -53,6 +53,8 @@ data: cgroup-root: /run/cilium/cgroupv2 cluster-id: "253" cluster-name: privatecilium.example.com + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -87,14 +89,19 @@ data: ingress-shared-lb-service-name: private-ingress install-iptables-rules: "true" ipam: kubernetes - kube-proxy-replacement: partial + kube-proxy-replacement: "false" monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: tunnel + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: vxlan + tunnel-protocol: vxlan + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -109,8 +116,9 @@ metadata: apiVersion: v1 data: - config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock + config.yaml: |- + cluster-name: "privatecilium.example.com" + peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 disable-server-tls: true @@ -130,6 +138,31 @@ metadata: --- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: hubble-peer + app.kubernetes.io/part-of: cilium + k8s-app: cilium + role.kubernetes.io/networking: "1" + name: hubble-peer + namespace: kube-system +spec: + internalTrafficPolicy: Local + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + selector: + k8s-app: cilium + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -137,6 +170,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -184,7 +218,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -192,6 +225,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -231,6 +268,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -243,6 +281,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -254,6 +293,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -265,8 +319,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -274,7 +336,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -362,7 +423,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -371,6 +431,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -379,10 +443,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -422,6 +493,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -442,6 +514,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -455,6 +528,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -567,6 +685,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium k8s-app: hubble-relay role.kubernetes.io/networking: "1" name: hubble-relay @@ -589,6 +709,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -603,6 +725,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -650,14 +774,9 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true preStop: exec: command: @@ -676,6 +795,11 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: + - containerPort: 4244 + hostPort: 4244 + name: peer-service + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: @@ -686,7 +810,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -707,12 +830,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -728,28 +853,81 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp - mountPath: /var/lib/cilium/tls/hubble name: hubble-tls readOnly: true hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -766,26 +944,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -794,6 +989,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -802,14 +999,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -822,17 +1023,32 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path - name: hubble-tls - secret: - optional: true - secretName: hubble-server-certs + projected: + defaultMode: 256 + sources: + - secret: + name: hubble-server-certs + optional: true updateStrategy: type: OnDelete @@ -845,6 +1061,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -865,6 +1083,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -907,7 +1127,7 @@ spec: value: api.internal.privatecilium.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -965,6 +1185,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium k8s-app: hubble-relay role.kubernetes.io/networking: "1" name: hubble-relay @@ -982,20 +1204,24 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium k8s-app: hubble-relay kops.k8s.io/managed-by: kops spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname containers: - args: - serve - - --peer-service=unix:///var/run/cilium/hubble.sock - - --listen-address=:4245 command: - hubble-relay - env: - - name: GODEBUG - value: x509ignoreCN=0 - image: quay.io/cilium/hubble-relay:v1.13.5 + image: quay.io/cilium/hubble-relay:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: tcpSocket: @@ -1007,11 +1233,15 @@ spec: readinessProbe: tcpSocket: port: grpc + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir - readOnly: true - mountPath: /etc/hubble-relay name: config readOnly: true @@ -1019,6 +1249,8 @@ spec: name: tls readOnly: true restartPolicy: Always + securityContext: + fsGroup: 65532 serviceAccount: hubble-relay serviceAccountName: hubble-relay terminationGracePeriodSeconds: 0 @@ -1036,10 +1268,6 @@ spec: topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - configMap: items: - key: config.yaml @@ -1048,6 +1276,7 @@ spec: name: config - name: tls projected: + defaultMode: 256 sources: - secret: items: @@ -1089,6 +1318,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium k8s-app: cilium role.kubernetes.io/networking: "1" name: hubble-relay-client-certs diff --git a/tests/integration/update_cluster/privatecilium2/kubernetes.tf b/tests/integration/update_cluster/privatecilium2/kubernetes.tf index 6b22a6111e..10d9ea6f44 100644 --- a/tests/integration/update_cluster/privatecilium2/kubernetes.tf +++ b/tests/integration/update_cluster/privatecilium2/kubernetes.tf @@ -1048,7 +1048,7 @@ resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8 resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content index 6cdad8fed9..9c0e1e7d75 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content @@ -232,7 +232,7 @@ spec: sidecarIstioProxyImage: cilium/istio_proxy toFqdnsDnsRejectResponseCode: refused tunnel: disabled - version: v1.13.5 + version: v1.14.3 nodeTerminationHandler: cpuRequest: 50m enableRebalanceDraining: false diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content index 4dc20bdb4d..aece7a3747 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-bootstrap_content @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: c6b553b26348b9bda91297615c885dfeb20ec41a56cdeedcf433255bd62d4d58 + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 1dc85a0c4d6148f60695875f169977272f69564eb1ee8a5cf6c4c7687376449d name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content index c6f3d54f17..65c81dd427 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -29,7 +29,6 @@ data: agent-health-port: "9879" auto-create-cilium-node-resource: "true" auto-direct-node-routes: "false" - blacklist-conflicting-routes: "false" bpf-ct-global-any-max: "262144" bpf-ct-global-tcp-max: "524288" bpf-lb-algorithm: random @@ -41,6 +40,8 @@ data: bpf-policy-map-max: "16384" cgroup-root: /run/cilium/cgroupv2 cluster-name: default + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log debug: "false" disable-cnp-status-updates: "true" disable-endpoint-crd: "false" @@ -57,6 +58,7 @@ data: enable-remote-node-identity: "true" enable-service-topology: "false" enable-unreachable-routes: "false" + eni-tags: KubernetesCluster=privateciliumadvanced.example.com etcd-config: |- --- endpoints: @@ -69,16 +71,20 @@ data: identity-change-grace-period: 5s install-iptables-rules: "true" ipam: eni - kube-proxy-replacement: strict + kube-proxy-replacement: "true" kvstore: etcd kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' monitor-aggregation: medium nodes-gc-interval: 5m0s preallocate-bpf-maps: "false" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy tofqdns-dns-reject-response-code: refused tofqdns-enable-poller: "false" - tunnel: disabled + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist kind: ConfigMap metadata: creationTimestamp: null @@ -98,6 +104,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium rules: @@ -145,7 +152,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -153,6 +159,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -192,6 +202,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch @@ -204,6 +215,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator rules: @@ -215,6 +227,21 @@ rules: - get - list - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -226,8 +253,16 @@ rules: - apiGroups: - "" resources: - - nodes + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -235,7 +270,6 @@ rules: resources: - services - endpoints - - namespaces verbs: - get - list @@ -323,7 +357,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -332,6 +365,10 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -340,10 +377,17 @@ rules: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -368,6 +412,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium roleRef: @@ -388,6 +433,7 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium role.kubernetes.io/networking: "1" name: cilium-operator roleRef: @@ -401,6 +447,51 @@ subjects: --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -408,6 +499,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kubernetes.io/cluster-service: "true" role.kubernetes.io/networking: "1" @@ -422,6 +515,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium k8s-app: cilium kops.k8s.io/managed-by: kops kubernetes.io/cluster-service: "true" @@ -469,14 +564,34 @@ spec: value: api.internal.privateciliumadvanced.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/cilium:v1.13.5 + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: - - /cni-install.sh - - --cni-exclusive=true + - bash + - -c + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore + fi + echo 'Done!' preStop: exec: command: @@ -495,6 +610,7 @@ spec: successThreshold: 1 timeoutSeconds: 5 name: cilium-agent + ports: null readinessProbe: failureThreshold: 3 httpGet: @@ -505,7 +621,6 @@ spec: path: /healthz port: 9879 scheme: HTTP - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 @@ -526,12 +641,14 @@ spec: port: 9879 scheme: HTTP periodSeconds: 2 - successThreshold: null + successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf mountPropagation: Bidirectional name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/etc/cni/net.d @@ -553,25 +670,78 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /tmp + name: tmp hostNetwork: true initContainers: - command: - - /install-plugin.sh - image: quay.io/cilium/cilium:v1.13.5 + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privateciliumadvanced.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent - name: install-cni-binaries - resources: - requests: - cpu: 100m - memory: 10Mi - securityContext: - capabilities: - drop: - - ALL - terminationMessagePath: /dev/termination-log + name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin name: cni-path - command: - /init-container.sh @@ -588,26 +758,43 @@ spec: key: clean-cilium-bpf-state name: cilium-config optional: true - image: quay.io/cilium/cilium:v1.13.5 + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privateciliumadvanced.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.14.3 imagePullPolicy: IfNotPresent name: clean-cilium-state - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi securityContext: privileged: true terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer name: bpf-maps - mountPath: /run/cilium/cgroupv2 mountPropagation: HostToContainer name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.3 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium @@ -616,6 +803,8 @@ spec: tolerations: - operator: Exists volumes: + - emptyDir: {} + name: tmp - hostPath: path: /var/run/cilium type: DirectoryOrCreate @@ -624,14 +813,18 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate @@ -644,7 +837,7 @@ spec: type: FileOrCreate name: xtables-lock - configMap: - defaultMode: 420 + defaultMode: 256 items: - key: etcd-config path: etcd.config @@ -655,10 +848,22 @@ spec: type: Directory name: etcd-secrets - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true - configMap: name: cilium-config name: cilium-config-path @@ -674,6 +879,8 @@ metadata: labels: addon.kops.k8s.io/name: networking.cilium.io app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator name: cilium-operator role.kubernetes.io/networking: "1" @@ -694,6 +901,8 @@ spec: metadata: creationTimestamp: null labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium io.cilium/app: operator kops.k8s.io/managed-by: kops name: cilium-operator @@ -736,7 +945,7 @@ spec: value: api.internal.privateciliumadvanced.example.com - name: KUBERNETES_SERVICE_PORT value: "443" - image: quay.io/cilium/operator:v1.13.5 + image: quay.io/cilium/operator:v1.14.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf b/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf index 5ae576329e..5d60a67f38 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf +++ b/tests/integration/update_cluster/privateciliumadvanced/kubernetes.tf @@ -1065,7 +1065,7 @@ resource "aws_s3_object" "privateciliumadvanced-example-com-addons-limit-range-a resource "aws_s3_object" "privateciliumadvanced-example-com-addons-networking-cilium-io-k8s-1-16" { bucket = "testingBucket" content = file("${path.module}/data/aws_s3_object_privateciliumadvanced.example.com-addons-networking.cilium.io-k8s-1.16_content") - key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.13.yaml" + key = "clusters.example.com/privateciliumadvanced.example.com/addons/networking.cilium.io/k8s-1.16-v1.14.yaml" provider = aws.files server_side_encryption = "AES256" } diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.13.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template similarity index 73% rename from upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.13.yaml.template rename to upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template index a6df0ca642..d8837d7456 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.13.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.14.yaml.template @@ -1,6 +1,7 @@ {{ with .Networking.Cilium }} {{ $semver := (trimPrefix "v" .Version) }} {{ $healthPort := (ternary 9879 9876 (semverCompare ">=1.11.6" $semver)) }} +{{ $operatorHealthPort := 9234 }} {{- if CiliumSecret }} apiVersion: v1 kind: Secret @@ -190,7 +191,13 @@ data: # - disabled # - vxlan (default) # - geneve - tunnel: "{{ .Tunnel }}" + {{ if eq .Tunnel "disabled" }} + # This option enables native-routing mode, in place of tunnel=disabled, now deprecated. + routing-mode: "native" + {{ else }} + routing-mode: "tunnel" + tunnel-protocol: "{{ .Tunnel }}" + {{ end }} # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: "{{ .ClusterName }}" @@ -200,6 +207,10 @@ data: cluster-id: "{{ .ClusterID }}" {{ end }} + remove-cilium-node-taints: "true" + set-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + # DNS response code for rejecting DNS requests, # available options are "nameError" and "refused" tofqdns-dns-reject-response-code: "{{ .ToFQDNsDNSRejectResponseCode }}" @@ -246,14 +257,14 @@ data: enable-host-reachable-services: "{{ .EnableHostReachableServices }}" {{ end }} enable-node-port: "{{ .EnableNodePort }}" - kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" + kube-proxy-replacement: "{{- if .EnableNodePort -}}true{{- else -}}false{{- end -}}" {{ with .IPAM }} ipam: {{ . }} {{ if eq . "eni" }} enable-endpoint-routes: "true" auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" + eni-tags: "{{ CloudLabels }}" {{ end }} {{ end }} @@ -306,6 +317,11 @@ data: {{ end }} {{ end }} + # Tell the agent to generate and write a CNI configuration file + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist + cni-exclusive: "true" + cni-log-file: "/var/run/cilium/cilium-cni.log" + {{ if WithDefaultBool .Hubble.Enabled false }} # Enable Hubble gRPC service. enable-hubble: "true" @@ -336,7 +352,8 @@ metadata: namespace: kube-system data: config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock + cluster-name: "{{ .ClusterName }}" + peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 disable-server-tls: true @@ -344,13 +361,34 @@ data: tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - +--- +# Source: cilium/templates/hubble/peer-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: hubble-peer + namespace: kube-system + labels: + k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: hubble-peer +spec: + selector: + k8s-app: cilium + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + internalTrafficPolicy: Local {{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - networking.k8s.io @@ -396,7 +434,6 @@ rules: - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies - ciliumegressgatewaypolicies - - ciliumegressnatpolicies - ciliumendpoints - ciliumendpointslices - ciliumenvoyconfigs @@ -404,6 +441,10 @@ rules: - ciliumlocalredirectpolicies - ciliumnetworkpolicies - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools verbs: - list - watch @@ -444,6 +485,7 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status verbs: - patch --- @@ -451,6 +493,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - "" @@ -460,6 +504,25 @@ rules: - get - list - watch + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch - apiGroups: - discovery.k8s.io resources: @@ -471,8 +534,18 @@ rules: - apiGroups: - "" resources: - - nodes + # to perform LB IP allocation for BGP + - services/status verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to check apiserver connectivity + - namespaces + verbs: + - get - list - watch - apiGroups: @@ -481,8 +554,6 @@ rules: # to perform the translation of a CNP that contains `ToGroup` to its endpoints - services - endpoints - # to check apiserver connectivity - - namespaces verbs: - get - list @@ -580,7 +651,6 @@ rules: - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -589,14 +659,25 @@ rules: - ciliumlocalredirectpolicies.cilium.io - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io - apiGroups: - cilium.io resources: - ciliumloadbalancerippools + - ciliumpodippools verbs: - get - list - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create - apiGroups: - cilium.io resources: @@ -633,6 +714,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -646,6 +729,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -654,6 +739,40 @@ subjects: - kind: ServiceAccount name: cilium-operator namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Source: cilium/templates/cilium-agent/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: + - kind: ServiceAccount + name: "cilium" + namespace: kube-system {{ if WithDefaultBool .Ingress.Enabled false }} --- # Source: cilium/templates/cilium-agent/role.yaml @@ -674,7 +793,6 @@ rules: - list - watch --- -# Source: cilium/templates/cilium-agent/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -779,6 +897,8 @@ metadata: namespace: kube-system labels: k8s-app: hubble-relay + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: type: ClusterIP selector: @@ -792,11 +912,13 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" name: cilium namespace: kube-system + labels: + k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-agent + kubernetes.io/cluster-service: "true" spec: selector: matchLabels: @@ -818,6 +940,8 @@ spec: {{- end }} labels: k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium kubernetes.io/cluster-service: "true" spec: affinity: @@ -830,10 +954,13 @@ spec: values: - linux containers: - - args: - - --config-dir=/tmp/cilium/config-map + - name: cilium-agent + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent command: - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map startupProbe: httpGet: host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' @@ -845,7 +972,7 @@ spec: value: "true" failureThreshold: 105 periodSeconds: 2 - successThreshold: + successThreshold: 1 livenessProbe: httpGet: host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' @@ -855,9 +982,9 @@ spec: httpHeaders: - name: "brief" value: "true" - failureThreshold: 10 periodSeconds: 30 successThreshold: 1 + failureThreshold: 10 timeoutSeconds: 5 resources: requests: @@ -872,10 +999,9 @@ spec: httpHeaders: - name: "brief" value: "true" - failureThreshold: 3 - initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 + failureThreshold: 3 timeoutSeconds: 5 env: - name: K8S_NODE_NAME @@ -910,87 +1036,259 @@ spec: - name: CILIUM_ENABLE_POLICY value: {{ . }} {{ end }} - image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent lifecycle: + {{ if eq .IPAM "eni" }} postStart: exec: command: - - /cni-install.sh - - --cni-exclusive=true + - "bash" + - "-c" + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore + fi + echo 'Done!' + {{- end }} preStop: exec: command: - /cni-uninstall.sh - name: cilium-agent - {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} ports: - {{ if .EnablePrometheusMetrics }} - - containerPort: {{ .AgentPrometheusPort }} - name: prometheus + {{- if WithDefaultBool .Hubble.Enabled false }} + - name: peer-service + containerPort: 4244 + hostPort: 4244 protocol: TCP - {{ end }} {{- if .Hubble.Metrics }} - containerPort: 9091 hostPort: 9091 name: hubble-metrics protocol: TCP {{- end }} - {{ end }} + {{- end }} + {{ if .EnablePrometheusMetrics }} + - containerPort: {{ .AgentPrometheusPort }} + name: prometheus + protocol: TCP + {{- end }} terminationMessagePolicy: FallbackToLogsOnError securityContext: privileged: true volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps + - name: bpf-maps + mountPath: /sys/fs/bpf {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} mountPropagation: Bidirectional {{- end }} - - mountPath: /var/run/cilium - name: cilium-run + - name: cilium-cgroup + mountPath: /run/cilium/cgroupv2 + - name: cilium-run + mountPath: /var/run/cilium {{- if not (semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver) }} - - mountPath: /host/opt/cni/bin - name: cni-path + - name: cni-path + mountPath: /host/opt/cni/bin {{- end }} - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d {{ if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path + - name: etcd-config-path + mountPath: /var/lib/etcd-config readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets + - name: etcd-secrets + mountPath: /var/lib/etcd-secrets readOnly: true {{ end }} - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path + - name: cilium-config-path + mountPath: /tmp/cilium/config-map readOnly: true # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules + - name: lib-modules + mountPath: /lib/modules readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock + - name: xtables-lock + mountPath: /run/xtables.lock + - name: tmp + mountPath: /tmp {{ if WithDefaultBool .Hubble.Enabled false }} - - mountPath: /var/lib/cilium/tls/hubble - name: hubble-tls + - name: hubble-tls + mountPath: /var/lib/cilium/tls/hubble readOnly: true {{ end }} {{ if CiliumSecret }} - mountPath: /etc/ipsec name: cilium-ipsec-secrets {{ end }} - hostNetwork: true - initContainers: - {{- if semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver }} - - command: - - /install-plugin.sh +{{ if .Debug }} + - name: cilium-monitor image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" imagePullPolicy: IfNotPresent - name: install-cni-binaries + command: + - /bin/bash + - -c + - -- + args: + - |- + for i in {1..5}; do \ + [ -S /var/run/cilium/monitor1_2.sock ] && break || sleep 10;\ + done; \ + cilium monitor --type=agent + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cilium-run + mountPath: /var/run/cilium +{{ end }} + initContainers: + - name: config + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: "{{ APIInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + volumeMounts: + - name: tmp + mountPath: /tmp + terminationMessagePolicy: FallbackToLogsOnError + # Required to mount cgroup2 filesystem on the underlying Kubernetes node. + # We use nsenter command with host's cgroup and mount namespaces enabled. + - name: mount-cgroup + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + - name: apply-sysctl-overwrites + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + - name: clean-cilium-state + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ APIInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true + {{- end }} + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} + mountPropagation: HostToContainer + {{- end }} + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + - name: cilium-run + mountPath: /var/run/cilium + {{- if semverCompare "~1.11.15 || ~1.12.8 || >=1.13.1" $semver }} + # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent + - name: install-cni-binaries + image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - /install-plugin.sh resources: requests: cpu: 100m @@ -1002,56 +1300,9 @@ spec: terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-path + - name: cni-path + mountPath: /host/opt/cni/bin {{- end }} - - command: - - /init-container.sh - env: - - name: CILIUM_ALL_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-state - name: cilium-config - optional: true - - name: CILIUM_BPF_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-bpf-state - name: cilium-config - optional: true - {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} - - name: CILIUM_WAIT_BPF_MOUNT - valueFrom: - configMapKeyRef: - key: wait-bpf-mount - name: cilium-config - optional: true - {{- end }} - image: "{{ or .Registry "quay.io" }}/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: clean-cilium-state - terminationMessagePolicy: FallbackToLogsOnError - securityContext: - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} - mountPropagation: HostToContainer - {{- end }} - # Required to mount cgroup filesystem from the host to cilium agent pod - - mountPath: /run/cilium/cgroupv2 - name: cilium-cgroup - mountPropagation: HostToContainer - - mountPath: /var/run/cilium - name: cilium-run - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - memory: 100Mi restartPolicy: Always priorityClassName: system-node-critical {{ if ContainerdSELinuxEnabled }} @@ -1063,53 +1314,62 @@ spec: serviceAccount: cilium serviceAccountName: cilium terminationGracePeriodSeconds: 1 + hostNetwork: true tolerations: - operator: Exists volumes: + # For sharing configuration between the "config" initContainer and the agent + - name: tmp + emptyDir: {} # To keep state between restarts / upgrades - - hostPath: + - name: cilium-run + hostPath: path: /var/run/cilium type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: path: /sys/fs/bpf type: DirectoryOrCreate - name: bpf-maps + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate # To install cilium cni plugin in the host - - hostPath: + - name: cni-path + hostPath: path: /opt/cni/bin type: DirectoryOrCreate - name: cni-path - # To keep state between restarts / upgrades for cgroup2 filesystem - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup # To install cilium cni configuration in the host - - hostPath: + - name: etc-cni-netd + hostPath: path: /etc/cni/net.d type: DirectoryOrCreate - name: etc-cni-netd # To be able to load kernel modules - - hostPath: + - name: lib-modules + hostPath: path: /lib/modules - name: lib-modules # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: + - name: xtables-lock + hostPath: path: /run/xtables.lock type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration {{- if .EtcdManaged }} # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 + - name: etcd-config-path + configMap: + name: cilium-config + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 items: - key: etcd-config path: etcd.config - name: cilium-config - name: etcd-config-path # To read the Cilium etcd secrets in case the user might want to use TLS - name: etcd-secrets hostPath: @@ -1117,11 +1377,27 @@ spec: type: Directory {{- end }} - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: cilium-clustermesh + optional: true + # note: items are not explicitly listed here, since the entries of this secret + # depend on the peers configured, and that would cause a restart of all agents + # at every addition/removal. Leaving the field empty makes each secret entry + # to be automatically projected into the volume as a file whose name is the key. + - secret: + name: clustermesh-apiserver-remote-cert + optional: true + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt - configMap: name: cilium-config name: cilium-config-path @@ -1132,9 +1408,13 @@ spec: {{ end }} {{ if WithDefaultBool .Hubble.Enabled false }} - name: hubble-tls - secret: - secretName: hubble-server-certs - optional: true + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-server-certs + optional: true {{ end }} --- apiVersion: apps/v1 @@ -1143,6 +1423,8 @@ metadata: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator name: cilium-operator namespace: kube-system spec: @@ -1165,6 +1447,8 @@ spec: labels: io.cilium/app: operator name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator spec: nodeSelector: null affinity: @@ -1178,12 +1462,15 @@ spec: - key: node-role.kubernetes.io/master operator: Exists containers: - - args: + - name: cilium-operator + image: "{{ or .Registry "quay.io" }}/cilium/operator:{{ .Version }}" + imagePullPolicy: IfNotPresent + command: + - cilium-operator + args: - "--config-dir=/tmp/cilium/config-map" - "--debug=$(CILIUM_DEBUG)" - "--eni-tags={{ CloudLabels }}" - command: - - cilium-operator env: - name: K8S_NODE_NAME valueFrom: @@ -1205,9 +1492,6 @@ spec: value: "{{ APIInternalName }}" - name: KUBERNETES_SERVICE_PORT value: "443" - image: "{{ or .Registry "quay.io" }}/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator {{ if .EnablePrometheusMetrics }} ports: - containerPort: 6942 @@ -1221,9 +1505,9 @@ spec: memory: {{ or .MemoryRequest "128Mi" }} livenessProbe: httpGet: - host: '127.0.0.1' + host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' path: /healthz - port: 9234 + port: {{ $operatorHealthPort }} scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 @@ -1296,9 +1580,11 @@ apiVersion: apps/v1 kind: Deployment metadata: name: hubble-relay + namespace: kube-system labels: k8s-app: hubble-relay - namespace: kube-system + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: replicas: 2 selector: @@ -1312,21 +1598,29 @@ spec: metadata: labels: k8s-app: hubble-relay + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium spec: + securityContext: + fsGroup: 65532 containers: - name: hubble-relay image: "{{ or .Registry "quay.io" }}/cilium/hubble-relay:{{ .Version }}" imagePullPolicy: IfNotPresent + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 command: - hubble-relay args: - - "serve" - - "--peer-service=unix:///var/run/cilium/hubble.sock" - - "--listen-address=:4245" - env: - # unfortunately, the addon CAs use only CN - - name: GODEBUG - value: x509ignoreCN=0 + - serve + {{- if .Debug }} + - '--debug' + {{- end }} ports: - name: grpc containerPort: 4245 @@ -1338,15 +1632,19 @@ spec: port: grpc terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir + - name: config + mountPath: /etc/hubble-relay readOnly: true - - mountPath: /etc/hubble-relay - name: config - readOnly: true - - mountPath: /var/lib/hubble-relay/tls - name: tls + - name: tls + mountPath: /var/lib/hubble-relay/tls readOnly: true + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname restartPolicy: Always serviceAccount: hubble-relay serviceAccountName: hubble-relay @@ -1365,17 +1663,16 @@ spec: matchLabels: k8s-app: hubble-relay volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - - configMap: + - name: config + configMap: name: hubble-relay-config items: - key: config.yaml path: config.yaml - name: config - - projected: + - name: tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 sources: - secret: name: hubble-relay-client-certs @@ -1386,7 +1683,6 @@ spec: path: client.key - key: ca.crt path: hubble-server-ca.crt - name: tls --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -1408,6 +1704,7 @@ kind: Certificate metadata: labels: k8s-app: cilium + app.kubernetes.io/part-of: cilium name: hubble-relay-client-certs namespace: kube-system spec: diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index 4bc5fdb8a1..7ba66ee6c9 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -35,7 +35,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { klog.Infof("found cilium (%q) in addons; won't use builtin", key) } else { id := "k8s-1.16" - location := key + "/" + id + "-v1.13.yaml" + location := key + "/" + id + "-v1.14.yaml" addon := &api.AddonSpec{ Name: fi.PtrTo(key), diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index dd9d29f14e..5e6b6fb335 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -98,8 +98,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml index 6f27e94b8c..40e0b93316 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/manifest.yaml @@ -112,8 +112,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183 name: networking.cilium.io needsRollingUpdate: all selector: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml index 94f1083de3..021076650f 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/manifest.yaml @@ -169,8 +169,8 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 9.99.0 - id: k8s-1.16 - manifest: networking.cilium.io/k8s-1.16-v1.13.yaml - manifestHash: 166325a914768c7916145fb5569a8673c50e90e74661391e63854fcf6a28daab + manifest: networking.cilium.io/k8s-1.16-v1.14.yaml + manifestHash: 2f32492b13ce87032e506c9b7977b78214ee645513c92b6fa7668df8022fd183 name: networking.cilium.io needsRollingUpdate: all selector: