diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index ea0bc0b7d9..fdf3873f9d 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -942,12 +942,8 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Could not parse as semantic version")) } - if !(version.Minor >= 8 && version.Minor <= 11) { - allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only versions 1.8 through 1.11 are supported")) - } - - if version.Minor < 10 && c.IsIPv6Only() { - allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "kOps only supports IPv6 on version 1.10 or later")) + if version.Minor != 11 && version.Patch < 5 { + allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.11 with patch version 5 or higher is supported")) } if v.Hubble != nil && fi.BoolValue(v.Hubble.Enabled) { @@ -955,16 +951,6 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Forbidden(fldPath.Child("hubble", "enabled"), "Hubble requires that cert manager is enabled")) } } - - if version.Minor < 10 && v.EncryptionType == kops.CiliumEncryptionTypeWireguard { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("encryptionType"), "Cilium EncryptionType=WireGuard is not available for Cilium version < 1.10.0.")) - } - - if version.Minor < 11 { - if v.EnableServiceTopology { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("enableServiceTopology"), "Service topology requires Cilium 1.11")) - } - } } if v.EnableNodePort && c.KubeProxy != nil && (c.KubeProxy.Enabled == nil || *c.KubeProxy.Enabled) { diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index 90049f3384..1dec88a898 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -921,7 +921,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.8.0", + Version: "v1.11.5", Hubble: &kops.HubbleSpec{ Enabled: fi.Bool(true), }, diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template deleted file mode 100644 index e569283f7e..0000000000 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template +++ /dev/null @@ -1,933 +0,0 @@ -{{- if CiliumSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: cilium-ipsec-keys - namespace: kube-system -stringData: - {{ CiliumSecret }} ---- -{{- end }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: cilium-config - namespace: kube-system -data: -{{ with .Networking.Cilium }} - -{{- if .EtcdManaged }} - kvstore: etcd - kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' - - etcd-config: |- - --- - endpoints: - - https://{{ $.MasterInternalName }}:4003 - - trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' - key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' - cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt' -{{ end }} - - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in a kvstore, etcd or consul, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - identity-allocation-mode: crd - # If you want to run cilium in debug mode change this value to true - debug: "{{ .Debug }}" - {{ if .EnablePrometheusMetrics }} - # If you want metrics enabled in all of your Cilium agents, set the port for - # which the Cilium agents will have their metrics exposed. - # This option deprecates the "prometheus-serve-addr" in the - # "cilium-metrics-config" ConfigMap - # NOTE that this will open the port on ALL nodes where Cilium pods are - # scheduled. - prometheus-serve-addr: ":{{ .AgentPrometheusPort }}" - operator-prometheus-serve-addr: ":6942" - enable-metrics: "true" - {{ end }} - {{ if .EnableEncryption }} - enable-ipsec: "true" - ipsec-key-file: /etc/ipsec/keys - {{ end }} - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. - enable-ipv4: "true" - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. - enable-ipv6: "false" - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: "{{ .MonitorAggregation }}" - # ct-global-max-entries-* specifies the maximum number of connections - # supported across all endpoints, split by protocol: tcp or other. One pair - # of maps uses these values for IPv4 connections, and another pair of maps - # use these values for IPv6 connections. - # - # If these values are modified, then during the next Cilium startup the - # tracking of ongoing connections may be disrupted. This may lead to brief - # policy drops or a change in loadbalancing decisions for a connection. - # - # For users upgrading from Cilium 1.2 or earlier, to minimize disruption - # during the upgrade process, comment out these options. - bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" - bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # This may lead to policy drops or a change in loadbalancing decisions for a - # connection for some time. Endpoints may need to be recreated to restore - # connectivity. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}" - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve - tunnel: "{{ .Tunnel }}" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: "{{ .ClusterName }}" - - # DNS response code for rejecting DNS requests, - # available options are "nameError" and "refused" - tofqdns-dns-reject-response-code: "{{ .ToFQDNsDNSRejectResponseCode }}" - # This option is disabled by default starting from version 1.4.x in favor - # of a more powerful DNS proxy-based implementation, see [0] for details. - # Enable this option if you want to use FQDN policies but do not want to use - # the DNS proxy. - # - # To ease upgrade, users may opt to set this option to "true". - # Otherwise please refer to the Upgrade Guide [1] which explains how to - # prepare policy rules for upgrade. - # - # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based - # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action - tofqdns-enable-poller: "{{- if .ToFQDNsEnablePoller -}}true{{- else -}}false{{- end -}}" - # wait-bpf-mount makes init container wait until bpf filesystem is mounted - wait-bpf-mount: "false" - # Enable fetching of container-runtime specific metadata - # - # By default, the Kubernetes pod and namespace labels are retrieved and - # associated with endpoints for identification purposes. By integrating - # with the container runtime, container runtime specific labels can be - # retrieved, such labels will be prefixed with container: - # - # CAUTION: The container runtime labels can include information such as pod - # annotations which may result in each pod being associated a unique set of - # labels which can result in excessive security identities being allocated. - # Please review the labels filter when enabling container runtime labels. - # - # Supported values: - # - containerd - # - crio - # - docker - # - none - # - auto (automatically detect the container runtime) - # - masquerade: "{{ .Masquerade }}" - install-iptables-rules: "{{ WithDefaultBool .InstallIptablesRules true }}" - auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}" - {{ if .EnableHostReachableServices }} - enable-host-reachable-services: "{{ .EnableHostReachableServices }}" - {{ end }} - enable-node-port: "{{ .EnableNodePort }}" - kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" - enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity -}}" - {{ with .IPAM }} - ipam: {{ . }} - {{ if eq . "eni" }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" - {{ end }} - {{ end }} - - {{ if WithDefaultBool .Hubble.Enabled false }} - # Enable Hubble gRPC service. - enable-hubble: "true" - # UNIX domain socket for Hubble server to listen to. - hubble-socket-path: "/var/run/cilium/hubble.sock" - {{ if .Hubble.Metrics }} - hubble-metrics-server: ":9091" - hubble-metrics: - {{- range .Hubble.Metrics }} - {{ . }} - {{- end }} - {{ end }} - {{ end }} - -{{ end }} # With .Networking.Cilium end ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium-operator - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium -rules: -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - get - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - - services - - nodes - - endpoints - - componentstatuses - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - nodes - - nodes/status - verbs: - - patch -- apiGroups: - - extensions - resources: - - ingresses - verbs: - - create - - get - - list - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - get - - list - - watch - - update -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/finalizers - - ciliumnetworkpolicies/status - - ciliumclusterwidenetworkpolicies - - ciliumclusterwidenetworkpolicies/finalizers - - ciliumclusterwidenetworkpolicies/status - - ciliumendpoints - - ciliumendpoints/finalizers - - ciliumendpoints/status - - ciliumnodes - - ciliumnodes/finalizers - - ciliumnodes/status - - ciliumidentities - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium-operator -rules: -- apiGroups: - - "" - resources: - # to automatically delete [core|kube]dns pods so that are starting to being - # managed by Cilium - - pods - verbs: - - get - - list - - watch - - delete -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - # to automatically read from k8s and import the node's pod CIDR to cilium's - # etcd so all nodes know how to reach another pod running in a different - # node. - - nodes - # to perform the translation of a CNP that contains `ToGroup` to its endpoints - - services - - endpoints - # to check apiserver connectivity - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/finalizers - - ciliumnetworkpolicies/status - - ciliumclusterwidenetworkpolicies - - ciliumclusterwidenetworkpolicies/finalizers - - ciliumclusterwidenetworkpolicies/status - - ciliumendpoints - - ciliumendpoints/finalizers - - ciliumendpoints/status - - ciliumnodes - - ciliumnodes/finalizers - - ciliumnodes/status - - ciliumidentities - - ciliumidentities/finalizers - - ciliumidentities/status - verbs: - - '*' -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium -subjects: -- kind: ServiceAccount - name: cilium - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium-operator -subjects: -- kind: ServiceAccount - name: cilium-operator - namespace: kube-system ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - name: cilium - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - template: - metadata: - annotations: - # This annotation plus the CriticalAddonsOnly toleration makes - # cilium to be a critical pod in the cluster, which ensures cilium - # gets priority scheduling. - # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ - scheduler.alpha.kubernetes.io/critical-pod: "" - {{ if .Networking.Cilium.EnablePrometheusMetrics }} - # Annotation required for prometheus auto-discovery scraping - # https://docs.cilium.io/en/v1.9/operations/metrics/#installation - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Networking.Cilium.AgentPrometheusPort }}" - {{ end }} - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: - - cilium - topologyKey: kubernetes.io/hostname - containers: - - args: - - --config-dir=/tmp/cilium/config-map - command: - - cilium-agent - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_FLANNEL_MASTER_DEVICE - valueFrom: - configMapKeyRef: - key: flannel-master-device - name: cilium-config - optional: true - - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT - valueFrom: - configMapKeyRef: - key: flannel-uninstall-on-exit - name: cilium-config - optional: true - - name: CILIUM_CLUSTERMESH_CONFIG - value: /var/lib/cilium/clustermesh/ - - name: CILIUM_CNI_CHAINING_MODE - valueFrom: - configMapKeyRef: - key: cni-chaining-mode - name: cilium-config - optional: true - - name: CILIUM_CUSTOM_CNI_CONF - valueFrom: - configMapKeyRef: - key: custom-cni-conf - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{.MasterInternalName}}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - {{ with .Networking.Cilium.EnablePolicy }} - - name: CILIUM_ENABLE_POLICY - value: {{ . }} - {{ end }} -{{ with .Networking.Cilium }} - image: "quay.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - lifecycle: - postStart: - exec: - command: - - /cni-install.sh - preStop: - exec: - command: - - /cni-uninstall.sh - livenessProbe: - httpGet: - host: '127.0.0.1' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 10 - # The initial delay for the liveness probe is intentionally large to - # avoid an endless kill & restart cycle if in the event that the initial - # bootstrapping takes longer than expected. - initialDelaySeconds: 120 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - name: cilium-agent - {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} - ports: - {{ if .EnablePrometheusMetrics }} - - containerPort: {{ .AgentPrometheusPort }} - name: prometheus - protocol: TCP - {{ end }} - {{- if .Hubble.Metrics }} - - containerPort: 9091 - hostPort: 9091 - name: hubble-metrics - protocol: TCP - {{- end }} - {{ end }} - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} - readinessProbe: - httpGet: - host: '127.0.0.1' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - securityContext: - capabilities: - add: - - NET_ADMIN - - SYS_MODULE - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - mountPropagation: HostToContainer - - mountPath: /var/run/cilium - name: cilium-run - - mountPath: /host/opt/cni/bin - name: cni-path - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd -{{ if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path - readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets - readOnly: true -{{ end }} - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets - readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true - # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock -{{ if CiliumSecret }} - - mountPath: /etc/ipsec - name: cilium-ipsec-secrets -{{ end }} - hostNetwork: true - initContainers: - - command: - - /init-container.sh - env: - - name: CILIUM_ALL_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-state - name: cilium-config - optional: true - - name: CILIUM_BPF_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-bpf-state - name: cilium-config - optional: true - - name: CILIUM_WAIT_BPF_MOUNT - valueFrom: - configMapKeyRef: - key: wait-bpf-mount - name: cilium-config - optional: true - image: "quay.io/cilium/cilium:{{ .Version }}" -## end of `with .Networking.Cilium` -#{{ end }} - imagePullPolicy: IfNotPresent - name: clean-cilium-state - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - memory: 100Mi - securityContext: - capabilities: - add: - - NET_ADMIN - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - - mountPath: /var/run/cilium - name: cilium-run - priorityClassName: system-node-critical - restartPolicy: Always - serviceAccount: cilium - serviceAccountName: cilium - terminationGracePeriodSeconds: 1 - tolerations: - - operator: Exists - volumes: - # To keep state between restarts / upgrades - - hostPath: - path: /var/run/cilium - type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: - path: /sys/fs/bpf - type: DirectoryOrCreate - name: bpf-maps - # To install cilium cni plugin in the host - - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate - name: cni-path - # To install cilium cni configuration in the host - - hostPath: - path: /etc/cni/net.d - type: DirectoryOrCreate - name: etc-cni-netd - # To be able to load kernel modules - - hostPath: - path: /lib/modules - name: lib-modules - # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration -{{- if .Networking.Cilium.EtcdManaged }} - # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 - items: - - key: etcd-config - path: etcd.config - name: cilium-config - name: etcd-config-path - # To read the Cilium etcd secrets in case the user might want to use TLS - - name: etcd-secrets - hostPath: - path: /etc/kubernetes/pki/cilium - type: Directory -{{- end }} - - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path -{{ if CiliumSecret }} - - name: cilium-ipsec-secrets - secret: - secretName: cilium-ipsec-keys -{{ end }} - updateStrategy: - type: OnDelete ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - io.cilium/app: operator - name: cilium-operator - name: cilium-operator - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - io.cilium/app: operator - name: cilium-operator - spec: - containers: - - args: - - --config-dir=/tmp/cilium/config-map - - --debug=$(CILIUM_DEBUG) - command: - - cilium-operator - env: - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: cilium-config - optional: true - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: AWS_ACCESS_KEY_ID - name: cilium-aws - optional: true - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AWS_SECRET_ACCESS_KEY - name: cilium-aws - optional: true - - name: AWS_DEFAULT_REGION - valueFrom: - secretKeyRef: - key: AWS_DEFAULT_REGION - name: cilium-aws - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{.MasterInternalName}}" - - name: KUBERNETES_SERVICE_PORT - value: "443" -{{ with .Networking.Cilium }} - image: "quay.io/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: 6942 - hostPort: 6942 - name: prometheus - protocol: TCP - {{ end }} - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} - livenessProbe: - httpGet: - host: "127.0.0.1" - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - timeoutSeconds: 3 - volumeMounts: - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true -{{- if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path - readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets - readOnly: true -{{- end }} - hostNetwork: true - priorityClassName: system-cluster-critical - restartPolicy: Always - serviceAccount: cilium-operator - serviceAccountName: cilium-operator - volumes: - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path -{{- if .EtcdManaged }} - # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 - items: - - key: etcd-config - path: etcd.config - name: cilium-config - name: etcd-config-path - # To read the k8s etcd secrets in case the user might want to use TLS - - name: etcd-secrets - hostPath: - path: /etc/kubernetes/pki/cilium - type: Directory -{{- end }} - - {{ if eq .IPAM "eni" }} - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 300 - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 300 - {{ end }} -{{ end }} - -{{ if WithDefaultBool .Networking.Cilium.Hubble.Enabled false }} ---- -# Source: cilium/charts/hubble-relay/templates/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: hubble-relay - namespace: kube-system - labels: - k8s-app: hubble-relay -spec: - type: ClusterIP - selector: - k8s-app: hubble-relay - ports: - - protocol: TCP - port: 80 - targetPort: 4245 ---- -# Source: cilium/charts/hubble-relay/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hubble-relay - namespace: kube-system - labels: - k8s-app: hubble-relay -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: hubble-relay - template: - metadata: - labels: - k8s-app: hubble-relay - spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "k8s-app" - operator: In - values: - - cilium - topologyKey: "kubernetes.io/hostname" - containers: - - name: hubble-relay - image: "quay.io/cilium/hubble-relay:{{ .Version }}" - imagePullPolicy: IfNotPresent - command: - - "hubble-relay" - args: - - "serve" - - "--peer-service=unix:///var/run/cilium/hubble.sock" - - "--listen-address=:4245" - ports: - - name: grpc - containerPort: 4245 - readinessProbe: - tcpSocket: - port: grpc - livenessProbe: - tcpSocket: - port: grpc - volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir - readOnly: true - restartPolicy: Always - terminationGracePeriodSeconds: 0 - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir -{{ end }} diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template deleted file mode 100644 index 6e81505bba..0000000000 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.9.yaml.template +++ /dev/null @@ -1,1102 +0,0 @@ -{{ with .Networking.Cilium }} -{{- if CiliumSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: cilium-ipsec-keys - namespace: kube-system -stringData: - {{ CiliumSecret }} ---- -{{- end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium-operator - namespace: kube-system -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: hubble-relay - namespace: kube-system -{{ end }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cilium-config - namespace: kube-system -data: - -{{- if .EtcdManaged }} - kvstore: etcd - kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' - - etcd-config: |- - --- - endpoints: - - https://{{ $.MasterInternalName }}:4003 - - trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' - key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' - cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt' -{{ end }} - - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in a kvstore, etcd or consul, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - # (default crd) - identity-allocation-mode: "{{ .IdentityAllocationMode }}" - - # If you want to run cilium in debug mode change this value to true - debug: "{{ .Debug }}" - {{ if .EnablePrometheusMetrics }} - # If you want metrics enabled in all of your Cilium agents, set the port for - # which the Cilium agents will have their metrics exposed. - # This option deprecates the "prometheus-serve-addr" in the - # "cilium-metrics-config" ConfigMap - # NOTE that this will open the port on ALL nodes where Cilium pods are - # scheduled. - prometheus-serve-addr: ":{{ .AgentPrometheusPort }}" - operator-prometheus-serve-addr: ":6942" - enable-metrics: "true" - {{ end }} - {{ if .EnableEncryption }} - enable-ipsec: "true" - ipsec-key-file: /etc/ipsec/keys - {{ end }} - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. - enable-ipv4: "true" - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. - enable-ipv6: "false" - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: "{{ .MonitorAggregation }}" - # ct-global-max-entries-* specifies the maximum number of connections - # supported across all endpoints, split by protocol: tcp or other. One pair - # of maps uses these values for IPv4 connections, and another pair of maps - # use these values for IPv6 connections. - # - # If these values are modified, then during the next Cilium startup the - # tracking of ongoing connections may be disrupted. This may lead to brief - # policy drops or a change in loadbalancing decisions for a connection. - # - # For users upgrading from Cilium 1.2 or earlier, to minimize disruption - # during the upgrade process, comment out these options. - bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" - bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" - - # BPF load balancing algorithm ("random", "maglev") (default "random") - bpf-lb-algorithm: "{{ .BPFLBAlgorithm }}" - - # Maglev per service backend table size (parameter M) (default 16381) - bpf-lb-maglev-table-size: "{{ .BPFLBMaglevTableSize }}" - - # bpf-nat-global-max specified the maximum number of entries in the - # BPF NAT table. (default 524288) - bpf-nat-global-max: "{{ .BPFNATGlobalMax }}" - - # bpf-neigh-global-max specified the maximum number of entries in the - # BPF neighbor table. (default 524288) - bpf-neigh-global-max: "{{ .BPFNeighGlobalMax }}" - - # bpf-policy-map-max specifies the maximum number of entries in endpoint - # policy map (per endpoint) (default 16384) - bpf-policy-map-max: "{{ .BPFPolicyMapMax }}" - - # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, - # backend and affinity maps. (default 65536) - bpf-lb-map-max: "{{ .BPFLBMapMax }}" - - {{ if .ChainingMode }} - cni-chaining-mode: "{{ .ChainingMode }}" - {{ end }} - - # enable-bpf-masquerade enables masquerading packets from endpoints leaving - # the host with BPF instead of iptables. (default false) - enable-bpf-masquerade: "{{ .EnableBPFMasquerade }}" - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # This may lead to policy drops or a change in loadbalancing decisions for a - # connection for some time. Endpoints may need to be recreated to restore - # connectivity. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}" - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve - tunnel: "{{ .Tunnel }}" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: "{{ .ClusterName }}" - - # DNS response code for rejecting DNS requests, - # available options are "nameError" and "refused" - tofqdns-dns-reject-response-code: "{{ .ToFQDNsDNSRejectResponseCode }}" - # This option is disabled by default starting from version 1.4.x in favor - # of a more powerful DNS proxy-based implementation, see [0] for details. - # Enable this option if you want to use FQDN policies but do not want to use - # the DNS proxy. - # - # To ease upgrade, users may opt to set this option to "true". - # Otherwise please refer to the Upgrade Guide [1] which explains how to - # prepare policy rules for upgrade. - # - # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based - # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action - tofqdns-enable-poller: "{{- if .ToFQDNsEnablePoller -}}true{{- else -}}false{{- end -}}" - # wait-bpf-mount makes init container wait until bpf filesystem is mounted - wait-bpf-mount: "false" - # Enable fetching of container-runtime specific metadata - # - # By default, the Kubernetes pod and namespace labels are retrieved and - # associated with endpoints for identification purposes. By integrating - # with the container runtime, container runtime specific labels can be - # retrieved, such labels will be prefixed with container: - # - # CAUTION: The container runtime labels can include information such as pod - # annotations which may result in each pod being associated a unique set of - # labels which can result in excessive security identities being allocated. - # Please review the labels filter when enabling container runtime labels. - # - # Supported values: - # - containerd - # - crio - # - docker - # - none - # - auto (automatically detect the container runtime) - # - masquerade: "{{ .Masquerade }}" - install-iptables-rules: "{{ WithDefaultBool .InstallIptablesRules true }}" - auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}" - {{ if .EnableHostReachableServices }} - enable-host-reachable-services: "{{ .EnableHostReachableServices }}" - {{ end }} - enable-node-port: "{{ .EnableNodePort }}" - kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" - - {{ with .IPAM }} - ipam: {{ . }} - {{ if eq . "eni" }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" - {{ end }} - {{ end }} - - # Disables usage of CiliumEndpoint CRD - disable-endpoint-crd: "{{ .DisableEndpointCRD }}" - - # Enable connectivity health checking between virtual endpoints (default true) - enable-endpoint-health-checking: "{{ .EnableEndpointHealthChecking }}" - - # Enable use of remote node identity (default false) - enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity }}" - - # enable-l7-proxy enables L7 proxy for L7 policy enforcement. (default true) - enable-l7-proxy: "{{ .EnableL7Proxy }}" - - cgroup-root: /run/cilium/cgroupv2 - - {{ if WithDefaultBool .Hubble.Enabled false }} - # Enable Hubble gRPC service. - enable-hubble: "true" - # UNIX domain socket for Hubble server to listen to. - hubble-socket-path: "/var/run/cilium/hubble.sock" - # An additional address for Hubble server to listen to (e.g. ":4244"). - hubble-listen-address: ":4244" - hubble-disable-tls: "false" - hubble-tls-cert-file: /var/lib/cilium/tls/hubble/tls.crt - hubble-tls-key-file: /var/lib/cilium/tls/hubble/tls.key - hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/ca.crt - {{ if .Hubble.Metrics }} - hubble-metrics-server: ":9091" - hubble-metrics: - {{- range .Hubble.Metrics }} - {{ . }} - {{- end }} - {{ end }} - {{ end }} - -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/templates/hubble-relay-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: hubble-relay-config - namespace: kube-system -data: - config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock - listen-address: :4245 - - disable-server-tls: true - - tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt - tls-client-key-file: /var/lib/hubble-relay/tls/client.key - tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - -{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium -rules: -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - get - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - - services - - nodes - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - nodes - - nodes/status - verbs: - - patch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - list - - watch - - update - - get -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - - ciliumclusterwidenetworkpolicies - - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - - ciliumendpoints - - ciliumendpoints/status - - ciliumendpoints/finalizers - - ciliumnodes - - ciliumnodes/status - - ciliumnodes/finalizers - - ciliumidentities - - ciliumidentities/finalizers - - ciliumlocalredirectpolicies - - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium-operator -rules: -- apiGroups: - - "" - resources: - # to automatically delete [core|kube]dns pods so that are starting to being - # managed by Cilium - - pods - verbs: - - get - - list - - watch - - delete -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - # to perform the translation of a CNP that contains `ToGroup` to its endpoints - - services - - endpoints - # to check apiserver connectivity - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - - ciliumclusterwidenetworkpolicies - - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - - ciliumendpoints - - ciliumendpoints/status - - ciliumendpoints/finalizers - - ciliumnodes - - ciliumnodes/status - - ciliumnodes/finalizers - - ciliumidentities - - ciliumidentities/status - - ciliumidentities/finalizers - - ciliumlocalredirectpolicies - - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - verbs: - - '*' -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - get - - list - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/templates/hubble-relay-clusterrole.yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: hubble-relay -rules: - - apiGroups: - - "" - resources: - - componentstatuses - - endpoints - - namespaces - - nodes - - pods - - services - verbs: - - get - - list - - watch -{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium -subjects: -- kind: ServiceAccount - name: cilium - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium-operator -subjects: -- kind: ServiceAccount - name: cilium-operator - namespace: kube-system -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: hubble-relay -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: hubble-relay -subjects: -- kind: ServiceAccount - namespace: kube-system - name: hubble-relay ---- -# Source: cilium/templates/hubble-relay-service.yaml -kind: Service -apiVersion: v1 -metadata: - name: hubble-relay - namespace: kube-system - labels: - k8s-app: hubble-relay -spec: - type: ClusterIP - selector: - k8s-app: hubble-relay - ports: - - protocol: TCP - port: 80 - targetPort: 4245 -{{ end }} ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - name: cilium - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - updateStrategy: - type: OnDelete - template: - metadata: - annotations: - # This annotation plus the CriticalAddonsOnly toleration makes - # cilium to be a critical pod in the cluster, which ensures cilium - # gets priority scheduling. - # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ - scheduler.alpha.kubernetes.io/critical-pod: "" - {{ if .EnablePrometheusMetrics }} - # Annotation required for prometheus auto-discovery scraping - # https://docs.cilium.io/en/v1.9/operations/metrics/#installation - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .AgentPrometheusPort }}" - {{ end }} -{{- with .AgentPodAnnotations }} - {{- . | nindent 8 }} -{{- end }} - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: - - cilium - topologyKey: kubernetes.io/hostname - containers: - - args: - - --config-dir=/tmp/cilium/config-map - command: - - cilium-agent - livenessProbe: - httpGet: - host: '127.0.0.1' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 10 - # The initial delay for the liveness probe is intentionally large to - # avoid an endless kill & restart cycle if in the event that the initial - # bootstrapping takes longer than expected. - initialDelaySeconds: 120 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} - readinessProbe: - httpGet: - host: '127.0.0.1' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_FLANNEL_MASTER_DEVICE - valueFrom: - configMapKeyRef: - key: flannel-master-device - name: cilium-config - optional: true - - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT - valueFrom: - configMapKeyRef: - key: flannel-uninstall-on-exit - name: cilium-config - optional: true - - name: CILIUM_CLUSTERMESH_CONFIG - value: /var/lib/cilium/clustermesh/ - - name: CILIUM_CNI_CHAINING_MODE - valueFrom: - configMapKeyRef: - key: cni-chaining-mode - name: cilium-config - optional: true - - name: CILIUM_CUSTOM_CNI_CONF - valueFrom: - configMapKeyRef: - key: custom-cni-conf - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ $.MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - {{ with .EnablePolicy }} - - name: CILIUM_ENABLE_POLICY - value: {{ . }} - {{ end }} - image: "quay.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - lifecycle: - postStart: - exec: - command: - - /cni-install.sh - preStop: - exec: - command: - - /cni-uninstall.sh - name: cilium-agent - {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} - ports: - {{ if .EnablePrometheusMetrics }} - - containerPort: {{ .AgentPrometheusPort }} - name: prometheus - protocol: TCP - {{ end }} - {{- if .Hubble.Metrics }} - - containerPort: 9091 - hostPort: 9091 - name: hubble-metrics - protocol: TCP - {{- end }} - {{ end }} - - securityContext: - capabilities: - add: - - NET_ADMIN - - SYS_MODULE - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - - mountPath: /var/run/cilium - name: cilium-run - - mountPath: /host/opt/cni/bin - name: cni-path - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd -{{ if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path - readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets - readOnly: true -{{ end }} - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets - readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true - # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock -{{ if WithDefaultBool .Hubble.Enabled false }} - - mountPath: /var/lib/cilium/tls/hubble - name: hubble-tls - readOnly: true -{{ end }} -{{ if CiliumSecret }} - - mountPath: /etc/ipsec - name: cilium-ipsec-secrets -{{ end }} - hostNetwork: true - initContainers: - - command: - - /init-container.sh - env: - - name: CILIUM_ALL_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-state - name: cilium-config - optional: true - - name: CILIUM_BPF_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-bpf-state - name: cilium-config - optional: true - - name: CILIUM_WAIT_BPF_MOUNT - valueFrom: - configMapKeyRef: - key: wait-bpf-mount - name: cilium-config - optional: true - image: "quay.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: clean-cilium-state - securityContext: - capabilities: - add: - - NET_ADMIN - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - mountPropagation: HostToContainer - # Required to mount cgroup filesystem from the host to cilium agent pod - - mountPath: /run/cilium/cgroupv2 - name: cilium-cgroup - mountPropagation: HostToContainer - - mountPath: /var/run/cilium - name: cilium-run - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - memory: 100Mi - restartPolicy: Always - priorityClassName: system-node-critical - serviceAccount: cilium - serviceAccountName: cilium - terminationGracePeriodSeconds: 1 - tolerations: - - operator: Exists - volumes: - # To keep state between restarts / upgrades - - hostPath: - path: /var/run/cilium - type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: - path: /sys/fs/bpf - type: DirectoryOrCreate - name: bpf-maps - # To install cilium cni plugin in the host - - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate - name: cni-path - # To keep state between restarts / upgrades for cgroup2 filesystem - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - # To install cilium cni configuration in the host - - hostPath: - path: /etc/cni/net.d - type: DirectoryOrCreate - name: etc-cni-netd - # To be able to load kernel modules - - hostPath: - path: /lib/modules - name: lib-modules - # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration -{{- if .EtcdManaged }} - # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 - items: - - key: etcd-config - path: etcd.config - name: cilium-config - name: etcd-config-path - # To read the Cilium etcd secrets in case the user might want to use TLS - - name: etcd-secrets - hostPath: - path: /etc/kubernetes/pki/cilium - type: Directory -{{- end }} - - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path -{{ if CiliumSecret }} - - name: cilium-ipsec-secrets - secret: - secretName: cilium-ipsec-keys -{{ end }} -{{ if WithDefaultBool .Hubble.Enabled false }} - - name: hubble-tls - secret: - secretName: hubble-server-certs - optional: true -{{ end }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - io.cilium/app: operator - name: cilium-operator - name: cilium-operator - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - io.cilium/app: operator - name: cilium-operator - spec: - containers: - - args: - - "--config-dir=/tmp/cilium/config-map" - - "--debug=$(CILIUM_DEBUG)" - - "--eni-tags={{ CloudLabels }}" - command: - - cilium-operator - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ $.MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - image: "quay.io/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: 6942 - hostPort: 6942 - name: prometheus - protocol: TCP - {{ end }} - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} - livenessProbe: - httpGet: - host: '127.0.0.1' - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - timeoutSeconds: 3 - volumeMounts: - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true -{{- if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path - readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets - readOnly: true -{{- end }} - hostNetwork: true - restartPolicy: Always - priorityClassName: system-cluster-critical - serviceAccount: cilium-operator - serviceAccountName: cilium-operator - tolerations: - - operator: Exists - volumes: - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path -{{- if .EtcdManaged }} - # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 - items: - - key: etcd-config - path: etcd.config - name: cilium-config - name: etcd-config-path - # To read the k8s etcd secrets in case the user might want to use TLS - - name: etcd-secrets - hostPath: - path: /etc/kubernetes/pki/cilium - type: Directory -{{- end }} - nodeSelector: - node-role.kubernetes.io/master: "" -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/charts/hubble-relay/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hubble-relay - labels: - k8s-app: hubble-relay - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: hubble-relay - strategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - k8s-app: hubble-relay - spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "k8s-app" - operator: In - values: - - cilium - topologyKey: "kubernetes.io/hostname" - containers: - - name: hubble-relay - image: "quay.io/cilium/hubble-relay:{{ .Version }}" - imagePullPolicy: IfNotPresent - command: - - hubble-relay - args: - - "serve" - - "--peer-service=unix:///var/run/cilium/hubble.sock" - - "--listen-address=:4245" - env: - # unfortunately, the addon CAs use only CN - - name: GODEBUG - value: x509ignoreCN=0 - ports: - - name: grpc - containerPort: 4245 - readinessProbe: - tcpSocket: - port: grpc - livenessProbe: - tcpSocket: - port: grpc - volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir - readOnly: true - - mountPath: /etc/hubble-relay - name: config - readOnly: true - - mountPath: /var/lib/hubble-relay/tls - name: tls - readOnly: true - restartPolicy: Always - serviceAccount: hubble-relay - serviceAccountName: hubble-relay - terminationGracePeriodSeconds: 0 - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - - configMap: - name: hubble-relay-config - items: - - key: config.yaml - path: config.yaml - name: config - - projected: - sources: - - secret: - name: hubble-relay-client-certs - items: - - key: tls.crt - path: client.crt - - key: tls.key - path: client.key - - key: ca.crt - path: hubble-server-ca.crt - name: tls ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - k8s-app: cilium - name: hubble-server-certs - namespace: kube-system -spec: - dnsNames: - - "*.default.hubble-grpc.cilium.io" - issuerRef: - kind: Issuer - name: networking.cilium.io - secretName: hubble-server-certs ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - k8s-app: cilium - name: hubble-relay-client-certs - namespace: kube-system -spec: - dnsNames: - - "hubble-relay-client" - issuerRef: - kind: Issuer - name: networking.cilium.io - usages: - - client auth - secretName: hubble-relay-client-certs -{{ end }} -{{ end }} diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.10.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.10.yaml.template deleted file mode 100644 index fa79d5e131..0000000000 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.10.yaml.template +++ /dev/null @@ -1,1179 +0,0 @@ -{{ with .Networking.Cilium }} -{{ $semver := (trimPrefix "v" .Version) }} -{{- if CiliumSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: cilium-ipsec-keys - namespace: kube-system -stringData: - {{ CiliumSecret }} ---- -{{- end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium-operator - namespace: kube-system -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: hubble-relay - namespace: kube-system -{{ end }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cilium-config - namespace: kube-system -data: - -{{- if .EtcdManaged }} - kvstore: etcd - kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' - - etcd-config: |- - --- - endpoints: - - https://{{ $.MasterInternalName }}:4003 - - trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' - key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' - cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt' - - enable-k8s-event-handover: "true" -{{ end }} - - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in a kvstore, etcd or consul, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - # (default crd) - identity-allocation-mode: "{{ .IdentityAllocationMode }}" - - # Time to wait before using new identity on endpoint identity change (default 5s) - identity-change-grace-period: "{{ .IdentityChangeGracePeriod }}" - - # If you want to run cilium in debug mode change this value to true - debug: "{{ .Debug }}" - - {{ if .EnablePrometheusMetrics }} - # If you want metrics enabled in all of your Cilium agents, set the port for - # which the Cilium agents will have their metrics exposed. - # This option deprecates the "prometheus-serve-addr" in the - # "cilium-metrics-config" ConfigMap - # NOTE that this will open the port on ALL nodes where Cilium pods are - # scheduled. - prometheus-serve-addr: ":{{ .AgentPrometheusPort }}" - operator-prometheus-serve-addr: ":6942" - enable-metrics: "true" - {{ end }} - - {{ if .EnableEncryption }} - {{ if eq .EncryptionType "ipsec" }} - enable-ipsec: "true" - ipsec-key-file: /etc/ipsec/keys - {{ else if eq .EncryptionType "wireguard" }} - enable-wireguard: "true" - {{ end }} - {{ end }} - - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. - enable-ipv4: "{{ not IsIPv6Only }}" - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. - enable-ipv6: "{{ IsIPv6Only }}" - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: "{{ .MonitorAggregation }}" - # ct-global-max-entries-* specifies the maximum number of connections - # supported across all endpoints, split by protocol: tcp or other. One pair - # of maps uses these values for IPv4 connections, and another pair of maps - # use these values for IPv6 connections. - # - # If these values are modified, then during the next Cilium startup the - # tracking of ongoing connections may be disrupted. This may lead to brief - # policy drops or a change in loadbalancing decisions for a connection. - # - # For users upgrading from Cilium 1.2 or earlier, to minimize disruption - # during the upgrade process, comment out these options. - bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" - bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" - - # BPF load balancing algorithm ("random", "maglev") (default "random") - bpf-lb-algorithm: "{{ .BPFLBAlgorithm }}" - - # Maglev per service backend table size (parameter M) (default 16381) - bpf-lb-maglev-table-size: "{{ .BPFLBMaglevTableSize }}" - - # bpf-nat-global-max specified the maximum number of entries in the - # BPF NAT table. (default 524288) - bpf-nat-global-max: "{{ .BPFNATGlobalMax }}" - - # bpf-neigh-global-max specified the maximum number of entries in the - # BPF neighbor table. (default 524288) - bpf-neigh-global-max: "{{ .BPFNeighGlobalMax }}" - - # bpf-policy-map-max specifies the maximum number of entries in endpoint - # policy map (per endpoint) (default 16384) - bpf-policy-map-max: "{{ .BPFPolicyMapMax }}" - - # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, - # backend and affinity maps. (default 65536) - bpf-lb-map-max: "{{ .BPFLBMapMax }}" - - # bpf-lb-sock-hostns-only enables skipping socket LB for services when inside a pod namespace, - # in favor of service LB at the pod interface. Socket LB is still used when in the host namespace. - # Required by service mesh (e.g., Istio, Linkerd). (default false) - bpf-lb-sock-hostns-only: "{{ .BPFLBSockHostNSOnly }}" - - {{ if .ChainingMode }} - cni-chaining-mode: "{{ .ChainingMode }}" - {{ end }} - - # enable-bpf-masquerade enables masquerading packets from endpoints leaving - # the host with BPF instead of iptables. (default false) - enable-bpf-masquerade: "{{ and (WithDefaultBool .EnableBPFMasquerade false) (not IsIPv6Only) }}" - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # This may lead to policy drops or a change in loadbalancing decisions for a - # connection for some time. Endpoints may need to be recreated to restore - # connectivity. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}" - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve - tunnel: "{{ .Tunnel }}" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: "{{ .ClusterName }}" - - # DNS response code for rejecting DNS requests, - # available options are "nameError" and "refused" - tofqdns-dns-reject-response-code: "{{ .ToFQDNsDNSRejectResponseCode }}" - # This option is disabled by default starting from version 1.4.x in favor - # of a more powerful DNS proxy-based implementation, see [0] for details. - # Enable this option if you want to use FQDN policies but do not want to use - # the DNS proxy. - # - # To ease upgrade, users may opt to set this option to "true". - # Otherwise please refer to the Upgrade Guide [1] which explains how to - # prepare policy rules for upgrade. - # - # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based - # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action - tofqdns-enable-poller: "{{- if .ToFQDNsEnablePoller -}}true{{- else -}}false{{- end -}}" - {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} - # wait-bpf-mount makes init container wait until bpf filesystem is mounted - wait-bpf-mount: "false" - {{- end }} - # Enable fetching of container-runtime specific metadata - # - # By default, the Kubernetes pod and namespace labels are retrieved and - # associated with endpoints for identification purposes. By integrating - # with the container runtime, container runtime specific labels can be - # retrieved, such labels will be prefixed with container: - # - # CAUTION: The container runtime labels can include information such as pod - # annotations which may result in each pod being associated a unique set of - # labels which can result in excessive security identities being allocated. - # Please review the labels filter when enabling container runtime labels. - # - # Supported values: - # - containerd - # - crio - # - docker - # - none - # - auto (automatically detect the container runtime) - # - masquerade: "{{ .Masquerade }}" - enable-ipv6-masquerade: "false" - install-iptables-rules: "{{ WithDefaultBool .InstallIptablesRules true }}" - auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}" - {{ if .EnableHostReachableServices }} - enable-host-reachable-services: "{{ .EnableHostReachableServices }}" - {{ end }} - enable-node-port: "{{ .EnableNodePort }}" - kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}" - - {{ with .IPAM }} - ipam: {{ . }} - {{ if eq . "eni" }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" - {{ end }} - {{ end }} - - # Disables usage of CiliumEndpoint CRD - disable-endpoint-crd: "{{ .DisableEndpointCRD }}" - - # Enable connectivity health checking between virtual endpoints (default true) - enable-endpoint-health-checking: "{{ .EnableEndpointHealthChecking }}" - - # Enable use of remote node identity (default false) - enable-remote-node-identity: "{{ .EnableRemoteNodeIdentity }}" - - # enable-l7-proxy enables L7 proxy for L7 policy enforcement. (default true) - enable-l7-proxy: "{{ .EnableL7Proxy }}" - - cgroup-root: /run/cilium/cgroupv2 - - disable-cnp-status-updates: "{{ .DisableCNPStatusUpdates }}" - - enable-service-topology: "{{ .EnableServiceTopology }}" - - {{ if WithDefaultBool .Hubble.Enabled false }} - # Enable Hubble gRPC service. - enable-hubble: "true" - # UNIX domain socket for Hubble server to listen to. - hubble-socket-path: "/var/run/cilium/hubble.sock" - # An additional address for Hubble server to listen to (e.g. ":4244"). - hubble-listen-address: ":4244" - hubble-disable-tls: "false" - hubble-tls-cert-file: /var/lib/cilium/tls/hubble/tls.crt - hubble-tls-key-file: /var/lib/cilium/tls/hubble/tls.key - hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/ca.crt - {{ if .Hubble.Metrics }} - hubble-metrics-server: ":9091" - hubble-metrics: - {{- range .Hubble.Metrics }} - {{ . }} - {{- end }} - {{ end }} - {{ end }} - -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/templates/hubble-relay-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: hubble-relay-config - namespace: kube-system -data: - config.yaml: | - peer-service: unix:///var/run/cilium/hubble.sock - listen-address: :4245 - - disable-server-tls: true - - tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt - tls-client-key-file: /var/lib/hubble-relay/tls/client.key - tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - -{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium -rules: -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - get - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - - services - - nodes - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - - pods/finalizers - verbs: - - get - - list - - watch - - update - - delete -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - nodes - - nodes/status - verbs: - - patch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - list - - watch - - update - - get -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - - ciliumclusterwidenetworkpolicies - - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - - ciliumendpoints - - ciliumendpoints/status - - ciliumendpoints/finalizers - - ciliumnodes - - ciliumnodes/status - - ciliumnodes/finalizers - - ciliumidentities - - ciliumidentities/finalizers - - ciliumlocalredirectpolicies - - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - - ciliumegressnatpolicies - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium-operator -rules: -- apiGroups: - - "" - resources: - # to automatically delete [core|kube]dns pods so that are starting to being - # managed by Cilium - - pods - verbs: - - get - - list - - watch - - delete -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - # to perform LB IP allocation for BGP - - services/status - verbs: - - update -- apiGroups: - - "" - resources: - # to perform the translation of a CNP that contains `ToGroup` to its endpoints - - services - - endpoints - # to check apiserver connectivity - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumnetworkpolicies/finalizers - - ciliumclusterwidenetworkpolicies - - ciliumclusterwidenetworkpolicies/status - - ciliumclusterwidenetworkpolicies/finalizers - - ciliumendpoints - - ciliumendpoints/status - - ciliumendpoints/finalizers - - ciliumnodes - - ciliumnodes/status - - ciliumnodes/finalizers - - ciliumidentities - - ciliumidentities/status - - ciliumidentities/finalizers - - ciliumlocalredirectpolicies - - ciliumlocalredirectpolicies/status - - ciliumlocalredirectpolicies/finalizers - verbs: - - '*' -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - get - - list - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/templates/hubble-relay-clusterrole.yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: hubble-relay -rules: - - apiGroups: - - "" - resources: - - componentstatuses - - endpoints - - namespaces - - nodes - - pods - - services - verbs: - - get - - list - - watch -{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium -subjects: -- kind: ServiceAccount - name: cilium - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium-operator -subjects: -- kind: ServiceAccount - name: cilium-operator - namespace: kube-system -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: hubble-relay -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: hubble-relay -subjects: -- kind: ServiceAccount - namespace: kube-system - name: hubble-relay ---- -# Source: cilium/templates/hubble-relay-service.yaml -kind: Service -apiVersion: v1 -metadata: - name: hubble-relay - namespace: kube-system - labels: - k8s-app: hubble-relay -spec: - type: ClusterIP - selector: - k8s-app: hubble-relay - ports: - - protocol: TCP - port: 80 - targetPort: 4245 -{{ end }} ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - name: cilium - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - updateStrategy: - type: OnDelete - template: - metadata: - annotations: - # This annotation plus the CriticalAddonsOnly toleration makes - # cilium to be a critical pod in the cluster, which ensures cilium - # gets priority scheduling. - # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ - scheduler.alpha.kubernetes.io/critical-pod: "" - {{ if .EnablePrometheusMetrics }} - # Annotation required for prometheus auto-discovery scraping - # https://docs.cilium.io/en/v1.9/operations/metrics/#installation - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .AgentPrometheusPort }}" - {{ end }} -{{- with .AgentPodAnnotations }} - {{- . | nindent 8 }} -{{- end }} - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - containers: - - args: - - --config-dir=/tmp/cilium/config-map - command: - - cilium-agent - startupProbe: - httpGet: - host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 105 - periodSeconds: 2 - successThreshold: - livenessProbe: - httpGet: - host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 10 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} - readinessProbe: - httpGet: - host: '{{- if IsIPv6Only -}}::1{{- else -}}127.0.0.1{{- end -}}' - path: /healthz - port: 9876 - scheme: HTTP - httpHeaders: - - name: "brief" - value: "true" - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_CLUSTERMESH_CONFIG - value: /var/lib/cilium/clustermesh/ - - name: CILIUM_CNI_CHAINING_MODE - valueFrom: - configMapKeyRef: - key: cni-chaining-mode - name: cilium-config - optional: true - - name: CILIUM_CUSTOM_CNI_CONF - valueFrom: - configMapKeyRef: - key: custom-cni-conf - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ $.MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - {{ with .EnablePolicy }} - - name: CILIUM_ENABLE_POLICY - value: {{ . }} - {{ end }} - image: "quay.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - lifecycle: - postStart: - exec: - command: - - /cni-install.sh - - --cni-exclusive=true - preStop: - exec: - command: - - /cni-uninstall.sh - name: cilium-agent - {{ if or .EnablePrometheusMetrics .Hubble.Metrics }} - ports: - {{ if .EnablePrometheusMetrics }} - - containerPort: {{ .AgentPrometheusPort }} - name: prometheus - protocol: TCP - {{ end }} - {{- if .Hubble.Metrics }} - - containerPort: 9091 - hostPort: 9091 - name: hubble-metrics - protocol: TCP - {{- end }} - {{ end }} - - securityContext: - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - {{- if semverCompare ">=1.10.4 || ~1.9.10" $semver }} - mountPropagation: Bidirectional - {{- end }} - - mountPath: /var/run/cilium - name: cilium-run - - mountPath: /host/opt/cni/bin - name: cni-path - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd -{{ if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path - readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets - readOnly: true -{{ end }} - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets - readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true - # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock -{{ if WithDefaultBool .Hubble.Enabled false }} - - mountPath: /var/lib/cilium/tls/hubble - name: hubble-tls - readOnly: true -{{ end }} -{{ if CiliumSecret }} - - mountPath: /etc/ipsec - name: cilium-ipsec-secrets -{{ end }} - hostNetwork: true - initContainers: - - command: - - /init-container.sh - env: - - name: CILIUM_ALL_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-state - name: cilium-config - optional: true - - name: CILIUM_BPF_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-bpf-state - name: cilium-config - optional: true - {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} - - name: CILIUM_WAIT_BPF_MOUNT - valueFrom: - configMapKeyRef: - key: wait-bpf-mount - name: cilium-config - optional: true - {{- end }} - image: "quay.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: clean-cilium-state - securityContext: - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - {{- if not (semverCompare ">=1.10.4 || ~1.9.10" $semver) }} - mountPropagation: HostToContainer - {{- end }} - # Required to mount cgroup filesystem from the host to cilium agent pod - - mountPath: /run/cilium/cgroupv2 - name: cilium-cgroup - mountPropagation: HostToContainer - - mountPath: /var/run/cilium - name: cilium-run - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - memory: 100Mi - restartPolicy: Always - priorityClassName: system-node-critical - serviceAccount: cilium - serviceAccountName: cilium - terminationGracePeriodSeconds: 1 - tolerations: - - operator: Exists - volumes: - # To keep state between restarts / upgrades - - hostPath: - path: /var/run/cilium - type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: - path: /sys/fs/bpf - type: DirectoryOrCreate - name: bpf-maps - # To install cilium cni plugin in the host - - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate - name: cni-path - # To keep state between restarts / upgrades for cgroup2 filesystem - - hostPath: - path: /run/cilium/cgroupv2 - type: Directory - name: cilium-cgroup - # To install cilium cni configuration in the host - - hostPath: - path: /etc/cni/net.d - type: DirectoryOrCreate - name: etc-cni-netd - # To be able to load kernel modules - - hostPath: - path: /lib/modules - name: lib-modules - # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration -{{- if .EtcdManaged }} - # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 - items: - - key: etcd-config - path: etcd.config - name: cilium-config - name: etcd-config-path - # To read the Cilium etcd secrets in case the user might want to use TLS - - name: etcd-secrets - hostPath: - path: /etc/kubernetes/pki/cilium - type: Directory -{{- end }} - - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path -{{ if CiliumSecret }} - - name: cilium-ipsec-secrets - secret: - secretName: cilium-ipsec-keys -{{ end }} -{{ if WithDefaultBool .Hubble.Enabled false }} - - name: hubble-tls - secret: - secretName: hubble-server-certs - optional: true -{{ end }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - io.cilium/app: operator - name: cilium-operator - name: cilium-operator - namespace: kube-system -spec: - replicas: {{ ControlPlaneControllerReplicas false }} - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - io.cilium/app: operator - name: cilium-operator - spec: - nodeSelector: null - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - - matchExpressions: - - key: node-role.kubernetes.io/master - operator: Exists - containers: - - args: - - "--config-dir=/tmp/cilium/config-map" - - "--debug=$(CILIUM_DEBUG)" - - "--eni-tags={{ CloudLabels }}" - command: - - cilium-operator - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ $.MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - image: "quay.io/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: 6942 - hostPort: 6942 - name: prometheus - protocol: TCP - {{ end }} - resources: - requests: - cpu: {{ or .CPURequest "25m" }} - memory: {{ or .MemoryRequest "128Mi" }} - livenessProbe: - httpGet: - host: '127.0.0.1' - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - timeoutSeconds: 3 - volumeMounts: - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true -{{- if .EtcdManaged }} - - mountPath: /var/lib/etcd-config - name: etcd-config-path - readOnly: true - - mountPath: /var/lib/etcd-secrets - name: etcd-secrets - readOnly: true -{{- end }} - hostNetwork: true - restartPolicy: Always - priorityClassName: system-cluster-critical - serviceAccount: cilium-operator - serviceAccountName: cilium-operator - tolerations: - - operator: Exists - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: "topology.kubernetes.io/zone" - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - - maxSkew: 1 - topologyKey: "kubernetes.io/hostname" - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - volumes: - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path -{{- if .EtcdManaged }} - # To read the etcd config stored in config maps - - configMap: - defaultMode: 420 - items: - - key: etcd-config - path: etcd.config - name: cilium-config - name: etcd-config-path - # To read the k8s etcd secrets in case the user might want to use TLS - - name: etcd-secrets - hostPath: - path: /etc/kubernetes/pki/cilium - type: Directory -{{- end }} -{{ if WithDefaultBool .Hubble.Enabled false }} ---- -# Source: cilium/charts/hubble-relay/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hubble-relay - labels: - k8s-app: hubble-relay - namespace: kube-system -spec: - replicas: 2 - selector: - matchLabels: - k8s-app: hubble-relay - strategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - k8s-app: hubble-relay - spec: - containers: - - name: hubble-relay - image: "quay.io/cilium/hubble-relay:{{ .Version }}" - imagePullPolicy: IfNotPresent - command: - - hubble-relay - args: - - "serve" - - "--peer-service=unix:///var/run/cilium/hubble.sock" - - "--listen-address=:4245" - env: - # unfortunately, the addon CAs use only CN - - name: GODEBUG - value: x509ignoreCN=0 - ports: - - name: grpc - containerPort: 4245 - readinessProbe: - tcpSocket: - port: grpc - livenessProbe: - tcpSocket: - port: grpc - volumeMounts: - - mountPath: /var/run/cilium - name: hubble-sock-dir - readOnly: true - - mountPath: /etc/hubble-relay - name: config - readOnly: true - - mountPath: /var/lib/hubble-relay/tls - name: tls - readOnly: true - restartPolicy: Always - serviceAccount: hubble-relay - serviceAccountName: hubble-relay - terminationGracePeriodSeconds: 0 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: "topology.kubernetes.io/zone" - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - k8s-app: hubble-relay - - maxSkew: 1 - topologyKey: "kubernetes.io/hostname" - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - k8s-app: hubble-relay - volumes: - - hostPath: - path: /var/run/cilium - type: Directory - name: hubble-sock-dir - - configMap: - name: hubble-relay-config - items: - - key: config.yaml - path: config.yaml - name: config - - projected: - sources: - - secret: - name: hubble-relay-client-certs - items: - - key: tls.crt - path: client.crt - - key: tls.key - path: client.key - - key: ca.crt - path: hubble-server-ca.crt - name: tls ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - k8s-app: cilium - name: hubble-server-certs - namespace: kube-system -spec: - dnsNames: - - "*.default.hubble-grpc.cilium.io" - issuerRef: - kind: Issuer - name: networking.cilium.io - secretName: hubble-server-certs ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - k8s-app: cilium - name: hubble-relay-client-certs - namespace: kube-system -spec: - dnsNames: - - "hubble-relay-client" - issuerRef: - kind: Issuer - name: networking.cilium.io - usages: - - client auth - secretName: hubble-relay-client-certs -{{ end }} -{{ end }} ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: cilium-operator - namespace: kube-system - labels: - io.cilium/app: operator - name: cilium-operator -spec: - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - maxUnavailable: 1 diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index f36e9fc2f5..940c170e01 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -17,10 +17,6 @@ limitations under the License. package bootstrapchannelbuilder import ( - "fmt" - - "github.com/blang/semver/v4" - "k8s.io/kops/channels/pkg/api" "k8s.io/kops/upup/pkg/fi" ) @@ -28,78 +24,23 @@ import ( func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { cilium := b.Cluster.Spec.Networking.Cilium if cilium != nil { - ver, err := semver.ParseTolerant(cilium.Version) - if err != nil { - return fmt.Errorf("Failed to parse cilium version: %w", err) - } - key := "networking.cilium.io" - if ver.Minor < 9 { - { - id := "k8s-1.12" - location := key + "/" + id + "-v1.8.yaml" - addons.Add(&api.AddonSpec{ - Name: fi.String(key), - Selector: networkingSelector(), - Manifest: fi.String(location), - Id: id, - NeedsRollingUpdate: "all", - }) - } - } else if ver.Minor == 9 { - { - id := "k8s-1.12" - location := key + "/" + id + "-v1.9.yaml" + { + id := "k8s-1.16" + location := key + "/" + id + "-v1.11.yaml" - addon := &api.AddonSpec{ - Name: fi.String(key), - Selector: networkingSelector(), - Manifest: fi.String(location), - Id: id, - NeedsRollingUpdate: "all", - } - if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { - addon.NeedsPKI = true - } - addons.Add(addon) + addon := &api.AddonSpec{ + Name: fi.String(key), + Selector: networkingSelector(), + Manifest: fi.String(location), + Id: id, + NeedsRollingUpdate: "all", } - } else if ver.Minor == 10 || (ver.Minor == 11 && ver.Patch < 5) { - { - id := "k8s-1.16" - location := key + "/" + id + "-v1.10.yaml" - - addon := &api.AddonSpec{ - Name: fi.String(key), - Selector: networkingSelector(), - Manifest: fi.String(location), - Id: id, - NeedsRollingUpdate: "all", - } - if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { - addon.NeedsPKI = true - } - addons.Add(addon) + if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { + addon.NeedsPKI = true } - } else if ver.Minor == 11 { - { - id := "k8s-1.16" - location := key + "/" + id + "-v1.11.yaml" - - addon := &api.AddonSpec{ - Name: fi.String(key), - Selector: networkingSelector(), - Manifest: fi.String(location), - Id: id, - NeedsRollingUpdate: "all", - } - if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { - addon.NeedsPKI = true - } - addons.Add(addon) - } - } else { - return fmt.Errorf("unknown cilium version: %q", cilium.Version) + addons.Add(addon) } } return nil