diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template index cb2abb6844..ded7ceb3c3 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template @@ -5,7 +5,7 @@ metadata: name: calico-config namespace: kube-system data: - # The calico-etcd PetSet service IP:port + # etcd servers etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} {{- range $j, $member := $cluster.Members -}} {{- if $j }},{{ end -}} @@ -18,33 +18,22 @@ data: # The CNI network configuration to install on each node. cni_network_config: |- { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "log_level": "info", - "ipam": { + "name": "k8s-pod-network", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "log_level": "info", + "ipam": { "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" } - ] } - --- kind: ClusterRole @@ -133,12 +122,15 @@ spec: operator: Exists - effect: NoSchedule operator: Exists + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node - image: quay.io/calico/node:v2.4.1 + image: quay.io/calico/node:v2.6.2 resources: requests: cpu: 10m @@ -169,6 +161,14 @@ spec: # Auto-detect the BGP IP address. - name: IP value: "" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" securityContext: privileged: true volumeMounts: @@ -185,7 +185,7 @@ spec: # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni - image: quay.io/calico/cni:v1.10.0 + image: quay.io/calico/cni:v1.11.0 resources: requests: cpu: 10m @@ -194,7 +194,7 @@ spec: env: # The name of calico config file - name: CNI_CONF_NAME - value: 10-calico.conflist + value: 10-calico.conf # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: @@ -237,8 +237,8 @@ spec: --- -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy +# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then +# be removed entirely once the new kube-controllers deployment has been deployed above. apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -246,35 +246,23 @@ metadata: namespace: kube-system labels: k8s-app: calico-policy - role.kubernetes.io/networking: "1" spec: - # The policy controller can only have a single active instance. - replicas: 1 + # Turn this deployment off in favor of the kube-controllers deployment above. + replicas: 0 + strategy: + type: Recreate template: metadata: name: calico-policy-controller namespace: kube-system labels: - k8s-app: calico-policy-controller - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' + k8s-app: calico-policy spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. hostNetwork: true serviceAccountName: calico - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists containers: - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.7.0 - resources: - requests: - cpu: 10m + image: quay.io/calico/kube-controllers:v1.0.0 env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS @@ -282,15 +270,6 @@ spec: configMapKeyRef: name: calico-config key: etcd_endpoints - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" volumeMounts: # Necessary for gossip based DNS @@ -301,6 +280,55 @@ spec: - name: etc-hosts hostPath: path: /etc/hosts +--- + +# This manifest deploys the Calico Kubernetes controllers. +# See https://github.com/projectcalico/kube-controllers +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" +spec: + # The controllers can only have a single active instance. + replicas: 1 + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + serviceAccountName: calico + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: calico-kube-controllers + image: quay.io/calico/kube-controllers:v1.0.0 + resources: + requests: + cpu: 10m + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + + {{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} # This manifest installs the k8s-ec2-srcdst container, which disables diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.8.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template similarity index 99% rename from upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.8.yaml.template rename to upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template index 048eb13616..5990da897c 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.8.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template @@ -16,11 +16,10 @@ data: calico_backend: "bird" # The CNI network configuration to install on each node. - # cniVersion should be 0.1.0 on k8s: https://github.com/projectcalico/calico/issues/742 cni_network_config: |- { "name": "k8s-pod-network", - "cniVersion": "0.1.0", + "cniVersion": "0.3.0", "plugins": [ { "type": "calico", diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 2255c2162e..3cc56a996f 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -468,11 +468,10 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri if b.cluster.Spec.Networking.Calico != nil { key := "networking.projectcalico.org" - // 2.6.3-kops.1 = 2.6.2 with kops manifest tweaks. This should go away with the next version bump. versions := map[string]string{ "pre-k8s-1.6": "2.4.1", - "k8s-1.6": "2.4.2-kops.1", - "k8s-1.8": "2.6.3-kops.1", + "k8s-1.6": "2.6.2", + "k8s-1.7": "2.6.2", } { @@ -499,14 +498,14 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.8.0", + KubernetesVersion: ">=1.6.0 <1.7.0", Id: id, }) manifests[key+"-"+id] = "addons/" + location } { - id := "k8s-1.8" + id := "k8s-1.7" location := key + "/" + id + ".yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ @@ -514,7 +513,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0", + KubernetesVersion: ">=1.7.0", Id: id, }) manifests[key+"-"+id] = "addons/" + location