From 2e4830423e8f84713796399e3df9f02d15db91cb Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 10 Apr 2017 11:30:31 -0400 Subject: [PATCH] Split calico configuration into 1.5 and 1.6 --- .../k8s-1.6.yaml.template | 265 ++++++++++++++++++ ...aml.template => pre-k8s-1.6.yaml.template} | 0 .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 35 ++- 3 files changed, 292 insertions(+), 8 deletions(-) create mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template rename upup/models/cloudup/resources/addons/networking.projectcalico.org/{v2.1.1.yaml.template => pre-k8s-1.6.yaml.template} (100%) diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template new file mode 100644 index 0000000000..de5780d753 --- /dev/null +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template @@ -0,0 +1,265 @@ +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The calico-etcd PetSet service IP:port + etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} + {{- range $j, $member := $cluster.Members -}} + {{- if $j }},{{ end -}} + http://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 + {{- end }}" + + # True enables BGP networking, false tells Calico to enforce + # policy only, using native networking. + enable_bgp: "true" + + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "log_level": "info", + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" + } + } + + # The default IP Pool to be created for the cluster. + # Pod IP addresses will be assigned from this pool. + ippool.yaml: | + apiVersion: v1 + kind: ipPool + metadata: + cidr: {{ .NonMasqueradeCIDR }} + spec: + ipip: + enabled: true + nat-outgoing: true + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node + role.kubernetes.io/networking: "1" +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + role.kubernetes.io/networking: "1" + spec: + hostNetwork: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: calico/node:v1.1.1 + resources: + requests: + cpu: 10m + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Enable BGP. Disable to enforce policy only. + - name: CALICO_NETWORKING + valueFrom: + configMapKeyRef: + name: calico-config + key: enable_bgp + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Don't configure a default pool. This is done by the Job + # below. + - name: NO_DEFAULT_POOLS + value: "true" + # Auto-detect the BGP IP address. + - name: IP + value: "" + securityContext: + privileged: true + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: calico/cni:v1.6.1 + resources: + requests: + cpu: 10m + imagePullPolicy: Always + command: ["/install-cni.sh"] + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + +--- + +# This manifest deploys the Calico policy controller on Kubernetes. +# See https://github.com/projectcalico/k8s-policy +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy + role.kubernetes.io/networking: "1" +spec: + # The policy controller can only have a single active instance. + replicas: 1 + template: + metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy-controller + role.kubernetes.io/networking: "1" + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + # The policy controller must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: calico-policy-controller + image: calico/kube-policy-controller:v0.5.4 + resources: + requests: + cpu: 10m + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The location of the Kubernetes API. Use the default Kubernetes + # service for API access. + - name: K8S_API + value: "https://kubernetes.default:443" + # Since we're running in the host namespace and might not have KubeDNS + # access, configure the container's /etc/hosts to resolve + # kubernetes.default to the correct service clusterIP. + - name: CONFIGURE_ETC_HOSTS + value: "true" + +--- + +## This manifest deploys a Job which performs one time +# configuration of Calico +apiVersion: batch/v1 +kind: Job +metadata: + name: configure-calico + namespace: kube-system + labels: + k8s-app: calico + role.kubernetes.io/networking: "1" +spec: + template: + metadata: + name: configure-calico + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + restartPolicy: OnFailure + containers: + # Writes basic configuration to datastore. + - name: configure-calico + image: calico/ctl:v1.1.1 + args: + - apply + - -f + - /etc/config/calico/ippool.yaml + volumeMounts: + - name: config-volume + mountPath: /etc/config + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + volumes: + - name: config-volume + configMap: + name: calico-config + items: + - key: ippool.yaml + path: calico/ippool.yaml diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/v2.1.1.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template similarity index 100% rename from upup/models/cloudup/resources/addons/networking.projectcalico.org/v2.1.1.yaml.template rename to upup/models/cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 143984ca46..fef0a28b2c 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -275,16 +275,35 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri key := "networking.projectcalico.org" version := "2.1.1" - location := key + "/v" + version + ".yaml" + { + location := key + "/pre-k8s-1.6.yaml" + id := "pre-k8s-1.6" - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"role.kubernetes.io/networking": "1"}, - Manifest: fi.String(location), - }) + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: networkingSelector, + Manifest: fi.String(location), + KubernetesVersion: "<1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } - manifests[key] = "addons/" + location + { + location := key + "/k8s-1.6.yaml" + id := "k8s-1.6" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: networkingSelector, + Manifest: fi.String(location), + KubernetesVersion: ">=1.6.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } } if b.cluster.Spec.Networking.Canal != nil {