From a6dce2e8205cbeb6c611ef61827d9a07dbf4763f Mon Sep 17 00:00:00 2001 From: Ole Markus With Date: Sat, 30 Jan 2021 18:13:23 +0100 Subject: [PATCH] Add support for cilium 1.9 Apply suggestions from code review Co-authored-by: Ciprian Hacman --- pkg/apis/kops/validation/validation.go | 4 +- pkg/model/components/cilium.go | 2 +- upup/models/bindata.go | 996 ++++++++++++++++++ .../k8s-1.12-v1.9.yaml.template | 977 +++++++++++++++++ .../cloudup/bootstrapchannelbuilder/cilium.go | 22 +- .../cilium/manifest.yaml | 6 +- 6 files changed, 1996 insertions(+), 11 deletions(-) create mode 100644 upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index 42bd3f1323..fbe98e70a9 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -691,8 +691,8 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Could not parse as semantic version")) } - if !(version.Minor >= 6 && version.Minor <= 8) { - allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only versions 1.6 through 1.8 are supported")) + if !(version.Minor >= 6 && version.Minor <= 9) { + allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only versions 1.6 through 1.9 are supported")) } if version.Minor == 6 && cluster.IsKubernetesGTE("1.16") { diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index d20147161d..db2c749618 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -39,7 +39,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Version == "" { - c.Version = "v1.8.4" + c.Version = "v1.9.4" } version, _ := semver.ParseTolerant(c.Version) diff --git a/upup/models/bindata.go b/upup/models/bindata.go index 01e9887dda..8ff72be3a8 100644 --- a/upup/models/bindata.go +++ b/upup/models/bindata.go @@ -28,6 +28,7 @@ // upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template // upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template +// upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template // upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml @@ -31302,6 +31303,999 @@ func cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate() (*asset, er return a, nil } +var _cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplate = []byte(`{{ with .Networking.Cilium }} +{{- if CiliumSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +stringData: + {{ CiliumSecret }} +--- +{{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + role.kubernetes.io/networking: "1" + name: hubble-relay + namespace: kube-system +{{ end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +data: + +{{- if .EtcdManaged }} + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' + + etcd-config: |- + --- + endpoints: + - https://{{ $.MasterInternalName }}:4003 + + trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' + key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' + cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt' +{{ end }} + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in a kvstore, etcd or consul, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + identity-allocation-mode: crd + # If you want to run cilium in debug mode change this value to true + debug: "{{ .Debug }}" + {{ if .EnablePrometheusMetrics }} + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. + prometheus-serve-addr: ":{{ .AgentPrometheusPort }}" + operator-prometheus-serve-addr: ":6942" + enable-metrics: "true" + {{ end }} + {{ if .EnableEncryption }} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + {{ end }} + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "true" + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "false" + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ .MonitorAggregation }}" + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" + bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}" + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "{{ .Tunnel }}" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ .ClusterName }}" + + # DNS response code for rejecting DNS requests, + # available options are "nameError" and "refused" + tofqdns-dns-reject-response-code: "{{ .ToFqdnsDNSRejectResponseCode }}" + # This option is disabled by default starting from version 1.4.x in favor + # of a more powerful DNS proxy-based implementation, see [0] for details. + # Enable this option if you want to use FQDN policies but do not want to use + # the DNS proxy. + # + # To ease upgrade, users may opt to set this option to "true". + # Otherwise please refer to the Upgrade Guide [1] which explains how to + # prepare policy rules for upgrade. + # + # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based + # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action + tofqdns-enable-poller: "{{- if .ToFqdnsEnablePoller -}}true{{- else -}}false{{- end -}}" + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" + # Enable fetching of container-runtime specific metadata + # + # By default, the Kubernetes pod and namespace labels are retrieved and + # associated with endpoints for identification purposes. By integrating + # with the container runtime, container runtime specific labels can be + # retrieved, such labels will be prefixed with container: + # + # CAUTION: The container runtime labels can include information such as pod + # annotations which may result in each pod being associated a unique set of + # labels which can result in excessive security identities being allocated. + # Please review the labels filter when enabling container runtime labels. + # + # Supported values: + # - containerd + # - crio + # - docker + # - none + # - auto (automatically detect the container runtime) + # + container-runtime: "{{ .ContainerRuntimeLabels }}" + masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}" + install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}" + auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}" + enable-node-port: "{{ .EnableNodePort }}" + kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" + enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity -}}" + {{ with .Ipam }} + ipam: {{ . }} + {{ if eq . "eni" }} + enable-endpoint-routes: "true" + auto-create-cilium-node-resource: "true" + blacklist-conflicting-routes: "false" + {{ end }} + {{ end }} + + {{ if WithDefaultBool .Hubble.Enabled false }} + # Enable Hubble gRPC service. + enable-hubble: "true" + # UNIX domain socket for Hubble server to listen to. + hubble-socket-path: "/var/run/cilium/hubble.sock" + {{ if .Hubble.Metrics }} + hubble-metrics-server: ":9091" + hubble-metrics: + {{- range .Hubble.Metrics }} + {{ . }} + {{- end }} + {{ end }} + {{ end }} + +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + peer-service: unix:///var/run/cilium/hubble.sock + listen-address: :4245 + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + disable-client-tls: true + disable-server-tls: true +{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium + labels: + role.kubernetes.io/networking: "1" +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - nodes + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - list + - watch + - update + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator + labels: + role.kubernetes.io/networking: "1" +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains ` + "`" + `ToGroup` + "`" + ` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + port: 80 + targetPort: 4245 +{{ end }} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + role.kubernetes.io/networking: "1" + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + updateStrategy: + type: OnDelete + template: + metadata: + annotations: + # This annotation plus the CriticalAddonsOnly toleration makes + # cilium to be a critical pod in the cluster, which ensures cilium + # gets priority scheduling. + # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - cilium + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + # The initial delay for the liveness probe is intentionally large to + # avoid an endless kill & restart cycle if in the event that the initial + # bootstrapping takes longer than expected. + initialDelaySeconds: 120 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_FLANNEL_MASTER_DEVICE + valueFrom: + configMapKeyRef: + key: flannel-master-device + name: cilium-config + optional: true + - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT + valueFrom: + configMapKeyRef: + key: flannel-uninstall-on-exit + name: cilium-config + optional: true + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ $.MasterInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + {{ with .EnablePolicy }} + - name: CILIUM_ENABLE_POLICY + value: {{ . }} + {{ end }} + image: "docker.io/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - /cni-install.sh + preStop: + exec: + command: + - /cni-uninstall.sh + name: cilium-agent + {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} + ports: + {{ if .EnablePrometheusMetrics }} + - containerPort: {{ .AgentPrometheusPort }} + name: prometheus + protocol: TCP + {{ end }} + {{- if .Hubble.Metrics }} + - containerPort: 9091 + hostPort: 9091 + name: hubble-metrics + protocol: TCP + {{- end }} + {{ end }} + + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd +{{ if .EtcdManaged }} + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true +{{ end }} + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + # Needed to be able to load kernel modules + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock +{{ if CiliumSecret }} + - mountPath: /etc/ipsec + name: cilium-ipsec-secrets +{{ end }} + hostNetwork: true + initContainers: + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true + image: "docker.io/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + mountPropagation: HostToContainer + - mountPath: /var/run/cilium + name: cilium-run + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + # To keep state between restarts / upgrades for bpf maps + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + # To install cilium cni plugin in the host + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + # To install cilium cni configuration in the host + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + # To be able to load kernel modules + - hostPath: + path: /lib/modules + name: lib-modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + # To read the clustermesh configuration +{{- if .EtcdManaged }} + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the Cilium etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: /etc/kubernetes/pki/cilium + type: Directory +{{- end }} + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +{{ if CiliumSecret }} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{{ end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + io.cilium/app: operator + name: cilium-operator + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + command: + - cilium-operator-generic + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ $.MasterInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: "docker.io/cilium/operator-generic:{{ .Version }}" + imagePullPolicy: IfNotPresent + name: cilium-operator + {{ if .EnablePrometheusMetrics }} + ports: + - containerPort: 6942 + hostPort: 6942 + name: prometheus + protocol: TCP + {{ end }} + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true +{{- if .EtcdManaged }} + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true +{{- end }} + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + volumes: + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +{{- if .EtcdManaged }} + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: /etc/kubernetes/pki/cilium + type: Directory +{{- end }} + nodeSelector: + node-role.kubernetes.io/master: "" +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/charts/hubble-relay/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + role.kubernetes.io/networking: "1" + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "k8s-app" + operator: In + values: + - cilium + topologyKey: "kubernetes.io/hostname" + containers: + - name: hubble-relay + image: "docker.io/cilium/hubble-relay:{{ .Networking.Cilium.Version }}" + imagePullPolicy: IfNotPresent + command: + - hubble-relay + args: + - "serve" + - "--peer-service=unix:///var/run/cilium/hubble.sock" + - "--listen-address=:4245" + ports: + - name: grpc + containerPort: 4245 + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config +{{ end }} +{{ end }}`) + +func cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplate, nil +} + +func cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplateBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate = []byte(`{{- if CiliumSecret }} apiVersion: v1 kind: Secret @@ -42984,6 +43978,7 @@ var _bindata = map[string]func() (*asset, error){ "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate, "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate, "cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate, + "cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplate, "cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate, "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml": cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, @@ -43118,6 +44113,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ }}, "networking.cilium.io": {nil, map[string]*bintree{ "k8s-1.12-v1.8.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate, map[string]*bintree{}}, + "k8s-1.12-v1.9.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112V19YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate, map[string]*bintree{}}, }}, "networking.flannel": {nil, map[string]*bintree{ diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template new file mode 100644 index 0000000000..da3aa4c9a1 --- /dev/null +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template @@ -0,0 +1,977 @@ +{{ with .Networking.Cilium }} +{{- if CiliumSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +stringData: + {{ CiliumSecret }} +--- +{{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + role.kubernetes.io/networking: "1" + name: hubble-relay + namespace: kube-system +{{ end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +data: + +{{- if .EtcdManaged }} + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' + + etcd-config: |- + --- + endpoints: + - https://{{ $.MasterInternalName }}:4003 + + trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' + key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' + cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt' +{{ end }} + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in a kvstore, etcd or consul, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + identity-allocation-mode: crd + # If you want to run cilium in debug mode change this value to true + debug: "{{ .Debug }}" + {{ if .EnablePrometheusMetrics }} + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. + prometheus-serve-addr: ":{{ .AgentPrometheusPort }}" + operator-prometheus-serve-addr: ":6942" + enable-metrics: "true" + {{ end }} + {{ if .EnableEncryption }} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + {{ end }} + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "true" + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "false" + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ .MonitorAggregation }}" + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" + bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}" + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "{{ .Tunnel }}" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ .ClusterName }}" + + # DNS response code for rejecting DNS requests, + # available options are "nameError" and "refused" + tofqdns-dns-reject-response-code: "{{ .ToFqdnsDNSRejectResponseCode }}" + # This option is disabled by default starting from version 1.4.x in favor + # of a more powerful DNS proxy-based implementation, see [0] for details. + # Enable this option if you want to use FQDN policies but do not want to use + # the DNS proxy. + # + # To ease upgrade, users may opt to set this option to "true". + # Otherwise please refer to the Upgrade Guide [1] which explains how to + # prepare policy rules for upgrade. + # + # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based + # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action + tofqdns-enable-poller: "{{- if .ToFqdnsEnablePoller -}}true{{- else -}}false{{- end -}}" + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" + # Enable fetching of container-runtime specific metadata + # + # By default, the Kubernetes pod and namespace labels are retrieved and + # associated with endpoints for identification purposes. By integrating + # with the container runtime, container runtime specific labels can be + # retrieved, such labels will be prefixed with container: + # + # CAUTION: The container runtime labels can include information such as pod + # annotations which may result in each pod being associated a unique set of + # labels which can result in excessive security identities being allocated. + # Please review the labels filter when enabling container runtime labels. + # + # Supported values: + # - containerd + # - crio + # - docker + # - none + # - auto (automatically detect the container runtime) + # + container-runtime: "{{ .ContainerRuntimeLabels }}" + masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}" + install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}" + auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}" + enable-node-port: "{{ .EnableNodePort }}" + kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" + enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity -}}" + {{ with .Ipam }} + ipam: {{ . }} + {{ if eq . "eni" }} + enable-endpoint-routes: "true" + auto-create-cilium-node-resource: "true" + blacklist-conflicting-routes: "false" + {{ end }} + {{ end }} + + {{ if WithDefaultBool .Hubble.Enabled false }} + # Enable Hubble gRPC service. + enable-hubble: "true" + # UNIX domain socket for Hubble server to listen to. + hubble-socket-path: "/var/run/cilium/hubble.sock" + {{ if .Hubble.Metrics }} + hubble-metrics-server: ":9091" + hubble-metrics: + {{- range .Hubble.Metrics }} + {{ . }} + {{- end }} + {{ end }} + {{ end }} + +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + peer-service: unix:///var/run/cilium/hubble.sock + listen-address: :4245 + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + disable-client-tls: true + disable-server-tls: true +{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium + labels: + role.kubernetes.io/networking: "1" +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - nodes + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - list + - watch + - update + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator + labels: + role.kubernetes.io/networking: "1" +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + port: 80 + targetPort: 4245 +{{ end }} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + role.kubernetes.io/networking: "1" + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + updateStrategy: + type: OnDelete + template: + metadata: + annotations: + # This annotation plus the CriticalAddonsOnly toleration makes + # cilium to be a critical pod in the cluster, which ensures cilium + # gets priority scheduling. + # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - cilium + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + # The initial delay for the liveness probe is intentionally large to + # avoid an endless kill & restart cycle if in the event that the initial + # bootstrapping takes longer than expected. + initialDelaySeconds: 120 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_FLANNEL_MASTER_DEVICE + valueFrom: + configMapKeyRef: + key: flannel-master-device + name: cilium-config + optional: true + - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT + valueFrom: + configMapKeyRef: + key: flannel-uninstall-on-exit + name: cilium-config + optional: true + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ $.MasterInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + {{ with .EnablePolicy }} + - name: CILIUM_ENABLE_POLICY + value: {{ . }} + {{ end }} + image: "docker.io/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - /cni-install.sh + preStop: + exec: + command: + - /cni-uninstall.sh + name: cilium-agent + {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} + ports: + {{ if .EnablePrometheusMetrics }} + - containerPort: {{ .AgentPrometheusPort }} + name: prometheus + protocol: TCP + {{ end }} + {{- if .Hubble.Metrics }} + - containerPort: 9091 + hostPort: 9091 + name: hubble-metrics + protocol: TCP + {{- end }} + {{ end }} + + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd +{{ if .EtcdManaged }} + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true +{{ end }} + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + # Needed to be able to load kernel modules + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock +{{ if CiliumSecret }} + - mountPath: /etc/ipsec + name: cilium-ipsec-secrets +{{ end }} + hostNetwork: true + initContainers: + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true + image: "docker.io/cilium/cilium:{{ .Version }}" + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + mountPropagation: HostToContainer + - mountPath: /var/run/cilium + name: cilium-run + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + # To keep state between restarts / upgrades for bpf maps + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + # To install cilium cni plugin in the host + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + # To install cilium cni configuration in the host + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + # To be able to load kernel modules + - hostPath: + path: /lib/modules + name: lib-modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + # To read the clustermesh configuration +{{- if .EtcdManaged }} + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the Cilium etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: /etc/kubernetes/pki/cilium + type: Directory +{{- end }} + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +{{ if CiliumSecret }} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{{ end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + io.cilium/app: operator + name: cilium-operator + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + command: + - cilium-operator-generic + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: "{{ $.MasterInternalName }}" + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: "docker.io/cilium/operator-generic:{{ .Version }}" + imagePullPolicy: IfNotPresent + name: cilium-operator + {{ if .EnablePrometheusMetrics }} + ports: + - containerPort: 6942 + hostPort: 6942 + name: prometheus + protocol: TCP + {{ end }} + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true +{{- if .EtcdManaged }} + - mountPath: /var/lib/etcd-config + name: etcd-config-path + readOnly: true + - mountPath: /var/lib/etcd-secrets + name: etcd-secrets + readOnly: true +{{- end }} + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + volumes: + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +{{- if .EtcdManaged }} + # To read the etcd config stored in config maps + - configMap: + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + name: cilium-config + name: etcd-config-path + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: /etc/kubernetes/pki/cilium + type: Directory +{{- end }} + nodeSelector: + node-role.kubernetes.io/master: "" +{{ if WithDefaultBool .Hubble.Enabled false }} +--- +# Source: cilium/charts/hubble-relay/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + role.kubernetes.io/networking: "1" + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "k8s-app" + operator: In + values: + - cilium + topologyKey: "kubernetes.io/hostname" + containers: + - name: hubble-relay + image: "docker.io/cilium/hubble-relay:{{ .Networking.Cilium.Version }}" + imagePullPolicy: IfNotPresent + command: + - hubble-relay + args: + - "serve" + - "--peer-service=unix:///var/run/cilium/hubble.sock" + - "--listen-address=:4245" + ports: + - name: grpc + containerPort: 4245 + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config +{{ end }} +{{ end }} \ No newline at end of file diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index a6ef80a300..73a4bea9e9 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -27,11 +27,8 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) { cilium := b.Cluster.Spec.Networking.Cilium if cilium != nil { ver, _ := semver.ParseTolerant(cilium.Version) - ver.Build = nil - ver.Pre = nil - v8, _ := semver.Parse("1.8.0") key := "networking.cilium.io" - if ver.LT(v8) { + if ver.Minor < 8 { version := "1.7.3-kops.1" { @@ -46,12 +43,27 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) { Id: id, }) } - } else { + } else if ver.Minor == 8 { version := "1.8.0-kops.1" { id := "k8s-1.12" location := key + "/" + id + "-v1.8.yaml" + addons.Spec.Addons = append(addons.Spec.Addons, &api.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: networkingSelector(), + Manifest: fi.String(location), + Id: id, + NeedsRollingUpdate: "all", + }) + } + } else if ver.Minor == 9 { + version := "1.9.0-kops.1" + { + id := "k8s-1.12" + location := key + "/" + id + "-v1.9.yaml" + addons.Spec.Addons = append(addons.Spec.Addons, &api.AddonSpec{ Name: fi.String(key), Version: fi.String(version), diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index 714121d13f..f7b11e660c 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -69,10 +69,10 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 1.17.0 - id: k8s-1.12 - manifest: networking.cilium.io/k8s-1.12-v1.8.yaml - manifestHash: 67853b52a456f1b701ada61ecab28c8c1f615183 + manifest: networking.cilium.io/k8s-1.12-v1.9.yaml + manifestHash: a0358ab8f634e6ce76b1c767c5da573e3645f3cc name: networking.cilium.io needsRollingUpdate: all selector: role.kubernetes.io/networking: "1" - version: 1.8.0-kops.1 + version: 1.9.0-kops.1