# Pulled and modified from: https://docs.projectcalico.org/v3.9/manifests/calico-typha.yaml --- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system labels: role.kubernetes.io/networking: "1" data: # You must set a non-zero value for Typha replicas below. typha_service_name: "{{- if .Networking.Calico.TyphaReplicas -}}calico-typha{{- else -}}none{{- end -}}" # Configure the backend to use. calico_backend: "bird" # Configure the MTU to use {{- if .Networking.Calico.MTU }} veth_mtu: "{{ .Networking.Calico.MTU }}" {{- else }} veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" {{- end }} # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.1", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: FelixConfiguration plural: felixconfigurations singular: felixconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ipamblocks.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPAMBlock plural: ipamblocks singular: ipamblock --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: blockaffinities.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BlockAffinity plural: blockaffinities singular: blockaffinity --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ipamhandles.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPAMHandle plural: ipamhandles singular: ipamhandle --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ipamconfigs.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPAMConfig plural: ipamconfigs singular: ipamconfig --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgppeers.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPPeer plural: bgppeers singular: bgppeer --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPConfiguration plural: bgpconfigurations singular: bgpconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPPool plural: ippools singular: ippool --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: HostEndpoint plural: hostendpoints singular: hostendpoint --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: ClusterInformation plural: clusterinformations singular: clusterinformation --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkPolicy plural: globalnetworkpolicies singular: globalnetworkpolicy --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkSet plural: globalnetworksets singular: globalnetworkset --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkPolicy plural: networkpolicies singular: networkpolicy --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networksets.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkSet plural: networksets singular: networkset --- # Source: calico/templates/rbac.yaml # Include a clusterrole for the kube-controllers component, # and bind it to the calico-kube-controllers serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers labels: role.kubernetes.io/networking: "1" rules: # Nodes are watched to monitor for deletions. - apiGroups: [""] resources: - nodes verbs: - watch - list - get # Pods are queried to check for existence. - apiGroups: [""] resources: - pods verbs: - get # IPAM resources are manipulated when nodes are deleted. - apiGroups: ["crd.projectcalico.org"] resources: - ippools verbs: - list - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete # Needs access to update clusterinformations. - apiGroups: ["crd.projectcalico.org"] resources: - clusterinformations verbs: - get - create - update --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers labels: role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node labels: role.kubernetes.io/networking: "1" rules: # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get - apiGroups: [""] resources: - endpoints - services verbs: # Used to discover service IPs for advertisement. - watch - list # Used to discover Typhas. - get - apiGroups: [""] resources: - nodes/status verbs: # Needed for clearing NodeNetworkUnavailable flag. - patch # Calico stores some configuration information in node annotations. - update # Watch for changes to Kubernetes NetworkPolicies. - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list # Used by Calico for policy information. - apiGroups: [""] resources: - pods - namespaces - serviceaccounts verbs: - list - watch # The CNI plugin patches pods/status. - apiGroups: [""] resources: - pods/status verbs: - patch # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - globalbgpconfigs - bgpconfigurations - ippools - ipamblocks - globalnetworkpolicies - globalnetworksets - networkpolicies - networksets - clusterinformations - hostendpoints - blockaffinities verbs: - get - list - watch # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: - ippools - felixconfigurations - clusterinformations verbs: - create - update # Calico stores some configuration information on the node. - apiGroups: [""] resources: - nodes verbs: - get - list - watch # These permissions are only required for upgrade from v2.6, and can # be removed after upgrade or on fresh installations. - apiGroups: ["crd.projectcalico.org"] resources: - bgpconfigurations - bgppeers verbs: - create - update # These permissions are required for Calico CNI to perform IPAM allocations. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete - apiGroups: ["crd.projectcalico.org"] resources: - ipamconfigs verbs: - get # Block affinities must also be watchable by confd for route aggregation. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities verbs: - watch # The Calico IPAM migration needs to get daemonsets. These permissions can be # removed if not upgrading from an installation using host-local IPAM. - apiGroups: ["apps"] resources: - daemonsets verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: calico-node labels: role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system {{ if .Networking.Calico.TyphaReplicas -}} --- # Source: calico/templates/calico-typha.yaml # This manifest creates a Service, which will be backed by Calico's Typha daemon. # Typha sits in between Felix and the API server, reducing Calico's load on the API server. apiVersion: v1 kind: Service metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: ports: - port: 5473 protocol: TCP targetPort: calico-typha name: calico-typha selector: k8s-app: calico-typha --- # This manifest creates a Deployment of Typha to back the above service. apiVersion: apps/v1 kind: Deployment metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the # typha_service_name variable in the calico-config ConfigMap above. # # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. replicas: {{ or .Networking.Calico.TyphaReplicas "0" }} revisionHistoryLimit: 2 selector: matchLabels: k8s-app: calico-typha template: metadata: labels: k8s-app: calico-typha role.kubernetes.io/networking: "1" annotations: # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical # add-on, ensuring it gets priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists # Since Calico can't network a pod until Typha is up, we need to run Typha itself # as a host-networked pod. serviceAccountName: calico-node priorityClassName: system-cluster-critical containers: - image: calico/typha:v3.9.6 name: calico-typha ports: - containerPort: 5473 name: calico-typha protocol: TCP env: # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - name: TYPHA_LOGSEVERITYSCREEN value: "info" # Disable logging to file and syslog since those don't make sense in Kubernetes. - name: TYPHA_LOGFILEPATH value: "none" - name: TYPHA_LOGSEVERITYSYS value: "none" # Monitor the Kubernetes API to find the number of running instances and rebalance # connections. - name: TYPHA_CONNECTIONREBALANCINGMODE value: "kubernetes" - name: TYPHA_DATASTORETYPE value: "kubernetes" - name: TYPHA_HEALTHENABLED value: "true" # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, # this opens a port on the host, which may need to be secured. - name: TYPHA_PROMETHEUSMETRICSENABLED value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}" - name: TYPHA_PROMETHEUSMETRICSPORT value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}" livenessProbe: httpGet: path: /liveness port: 9098 host: localhost periodSeconds: 30 initialDelaySeconds: 30 readinessProbe: httpGet: path: /readiness port: 9098 host: localhost periodSeconds: 10 --- # This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: maxUnavailable: 1 selector: matchLabels: k8s-app: calico-typha {{- end -}} --- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well # as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node role.kubernetes.io/networking: "1" spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node role.kubernetes.io/networking: "1" annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets # priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 priorityClassName: system-node-critical initContainers: # This container performs upgrade from host-local IPAM to calico-ipam. # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam image: calico/cni:v3.9.6 command: ["/opt/cni/bin/calico-ipam", "-upgrade"] env: - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend volumeMounts: - mountPath: /var/lib/cni/networks name: host-local-net-dir - mountPath: /host/opt/cni/bin name: cni-bin-dir # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni image: calico/cni:v3.9.6 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. - name: flexvol-driver image: calico/pod2daemon-flexvol:v3.9.6 volumeMounts: - name: flexvol-driver-host mountPath: /host/driver containers: # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: calico/node:v3.9.6 env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" # Typha support: controlled by the ConfigMap. - name: FELIX_TYPHAK8SSERVICENAME valueFrom: configMapKeyRef: name: calico-config key: typha_service_name # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" # Set based on the k8s node name. - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE # was value: "k8s,bgp" value: "kops,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" - name: IP_AUTODETECTION_METHOD value: "{{- or .Networking.Calico.IPv4AutoDetectionMethod "first-found" }}" - name: IP6_AUTODETECTION_METHOD value: "{{- or .Networking.Calico.IPv6AutoDetectionMethod "first-found" }}" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}} {{- or .Networking.Calico.IPIPMode "Always" -}} {{- end -}}" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. - name: CALICO_IPV4POOL_CIDR value: "{{ .KubeControllerManager.ClusterCIDR }}" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to the desired level - name: FELIX_LOGSEVERITYSCREEN value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - name: FELIX_HEALTHENABLED value: "true" # kops additions # Set Felix iptables binary variant, Legacy or NFT - name: FELIX_IPTABLESBACKEND value: "{{- or .Networking.Calico.IptablesBackend "Legacy" }}" # Set to enable the experimental Prometheus metrics server - name: FELIX_PROMETHEUSMETRICSENABLED value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" # TCP port that the Prometheus metrics server should bind to - name: FELIX_PROMETHEUSMETRICSPORT value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" # Enable Prometheus Go runtime metrics collection - name: FELIX_PROMETHEUSGOMETRICSENABLED value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" # Enable Prometheus process metrics collection - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" securityContext: privileged: true resources: requests: cpu: {{ or .Networking.Calico.CPURequest "100m" }} livenessProbe: exec: command: - /bin/calico-node - -felix-live - -bird-live periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: exec: command: - /bin/calico-node - -felix-ready - -bird-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - name: policysync mountPath: /var/run/nodeagent volumes: # Used by calico-node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Mount in the directory for host-local IPAM allocations. This is # used when upgrading from host-local to calico-ipam, and can be removed # if not using the upgrade-ipam init container. - name: host-local-net-dir hostPath: path: /var/lib/cni/networks # Used to create per-pod Unix Domain Sockets - name: policysync hostPath: type: DirectoryOrCreate path: /var/run/nodeagent # Used to install Flex Volume Driver - name: flexvol-driver-host hostPath: type: DirectoryOrCreate path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system labels: role.kubernetes.io/networking: "1" --- # Source: calico/templates/calico-kube-controllers.yaml # See https://github.com/projectcalico/kube-controllers apiVersion: apps/v1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers role.kubernetes.io/networking: "1" spec: # The controllers can only have a single active instance. replicas: 1 selector: matchLabels: k8s-app: calico-kube-controllers strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers role.kubernetes.io/networking: "1" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: calico-kube-controllers priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers image: calico/kube-controllers:v3.9.6 env: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: node - name: DATASTORE_TYPE value: kubernetes readinessProbe: exec: command: - /usr/bin/check-status - -r --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system labels: role.kubernetes.io/networking: "1" {{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} # This manifest installs the k8s-ec2-srcdst container, which disables # src/dst ip checks to allow BGP to function for calico for hosts within subnets # This only applies for AWS environments. --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: k8s-ec2-srcdst labels: role.kubernetes.io/networking: "1" rules: - apiGroups: - "" resources: - nodes verbs: - get - list - watch - update - patch --- apiVersion: v1 kind: ServiceAccount metadata: name: k8s-ec2-srcdst namespace: kube-system labels: role.kubernetes.io/networking: "1" --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: k8s-ec2-srcdst labels: role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: k8s-ec2-srcdst subjects: - kind: ServiceAccount name: k8s-ec2-srcdst namespace: kube-system --- apiVersion: apps/v1 kind: Deployment metadata: name: k8s-ec2-srcdst namespace: kube-system labels: k8s-app: k8s-ec2-srcdst role.kubernetes.io/networking: "1" spec: replicas: 1 selector: matchLabels: k8s-app: k8s-ec2-srcdst template: metadata: labels: k8s-app: k8s-ec2-srcdst role.kubernetes.io/networking: "1" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: CriticalAddonsOnly operator: Exists serviceAccountName: k8s-ec2-srcdst containers: - image: ottoyiu/k8s-ec2-srcdst:v0.2.2 name: k8s-ec2-srcdst resources: requests: cpu: 10m memory: 64Mi env: - name: AWS_REGION value: {{ Region }} volumeMounts: - name: ssl-certs mountPath: "/etc/ssl/certs/ca-certificates.crt" readOnly: true imagePullPolicy: "Always" volumes: - name: ssl-certs hostPath: path: "/etc/ssl/certs/ca-certificates.crt" nodeSelector: node-role.kubernetes.io/master: "" {{- end -}}