Merge pull request #8104 from hakman/calico-3.10.2

Update Calico to v3.10.2
This commit is contained in:
Kubernetes Prow Robot 2019-12-18 08:37:57 -08:00 committed by GitHub
commit 1d40aab81c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 1222 additions and 65 deletions

View File

@ -1,12 +1,6 @@
# Canal Version v3.10.1
# https://docs.projectcalico.org/v3.10/release-notes/#v3101
# This manifest includes the following component versions:
# calico/cni:v3.10.1
# calico/node:v3.10.1
# calico/pod2daemon-flexvol:v3.10.1
# calico/typha:v3.10.1
# quay.io/coreos/flannel:v0.11.0
{{- /* Pulled and modified from: https://docs.projectcalico.org/v3.10/manifests/canal.yaml */ -}}
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Canal installation.
kind: ConfigMap
@ -14,7 +8,10 @@ apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
# Typha is disabled.
typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
@ -25,8 +22,12 @@ data:
# the pod network.
masquerade: "true"
# MTU default is 1500, can be overridden
veth_mtu: "{{- or .Networking.Canal.MTU "1500" }}"
# Configure the MTU to use
{{- if .Networking.Canal.MTU }}
veth_mtu: "{{ .Networking.Canal.MTU }}"
{{- else }}
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
@ -39,8 +40,8 @@ data:
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"mtu": __CNI_MTU__,
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
@ -70,12 +71,13 @@ data:
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -90,6 +92,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -105,6 +109,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -120,6 +126,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -135,6 +143,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -150,6 +160,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -165,6 +177,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -180,6 +194,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -195,6 +211,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -210,6 +228,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -225,6 +245,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -240,6 +262,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -255,6 +279,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
@ -270,6 +296,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
@ -278,8 +306,8 @@ spec:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
@ -287,6 +315,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
@ -390,6 +420,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups: [""]
resources:
@ -413,6 +445,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal-flannel
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@ -422,11 +456,12 @@ subjects:
name: canal
namespace: kube-system
---
# Bind the Calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@ -436,17 +471,9 @@ subjects:
name: canal
namespace: kube-system
{{ if .Networking.Canal.TyphaReplicas -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
{{- if .Networking.Canal.TyphaReplicas }}
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
@ -457,6 +484,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
ports:
- port: 5473
@ -477,6 +505,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the canal-config ConfigMap above.
@ -493,6 +522,7 @@ spec:
metadata:
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
@ -500,27 +530,25 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
hostNetwork: true
nodeSelector:
beta.kubernetes.io/os: linux
kubernetes.io/role: master
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: canal
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: calico/typha:v3.10.1
- image: calico/typha:v3.10.2
name: calico-typha
ports:
- containerPort: 5473
@ -575,17 +603,18 @@ metadata:
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end }}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the canal container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
@ -594,6 +623,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
@ -606,6 +636,7 @@ spec:
metadata:
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
@ -613,7 +644,6 @@ spec:
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
@ -630,22 +660,17 @@ spec:
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the Calico CNI binaries
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.10.1
image: calico/cni:v3.10.2
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
@ -657,6 +682,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
@ -668,16 +699,16 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.10.1
image: calico/pod2daemon-flexvol:v3.10.2
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
containers:
# Runs calico/node container on each Kubernetes node. This
# Runs canal container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.10.1
image: calico/node:v3.10.2
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
@ -706,36 +737,42 @@ spec:
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
# was value: "k8s,bgp"
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Canal.IptablesBackend "Legacy" }}"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "INFO"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Canal.IptablesBackend "Legacy" }}"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
@ -748,8 +785,6 @@ spec:
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
@ -856,3 +891,12 @@ spec:
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"

View File

@ -829,6 +829,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
"k8s-1.7": "2.6.12-kops.1",
"k8s-1.7-v3": "3.8.0-kops.2",
"k8s-1.12": "3.9.3-kops.2",
"k8s-1.16": "3.10.2-kops.1",
}
{
@ -840,7 +841,21 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.12.0",
KubernetesVersion: ">=1.12.0 <1.16.0",
Id: id,
})
}
{
id := "k8s-1.16"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.16.0",
Id: id,
})
}
@ -911,8 +926,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
"k8s-1.6": "2.4.2-kops.2",
"k8s-1.8": "2.6.7-kops.3",
"k8s-1.9": "3.2.3-kops.1",
"k8s-1.12": "3.7.4",
"k8s-1.15": "3.10.1-kops.2",
"k8s-1.12": "3.7.4-kops.1",
"k8s-1.15": "3.10.2-kops.1",
}
{
id := "pre-k8s-1.6"