Merge pull request #7528 from mikesplain/calico_update_and_typha

Calico update and typha
This commit is contained in:
Kubernetes Prow Robot 2019-09-09 21:32:54 -07:00 committed by GitHub
commit a7eb9a202d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 464 additions and 122 deletions

View File

@ -113,7 +113,7 @@ spec:
### Configuring Weave Net EXTRA_ARGS
Weave allows you to pass command line arguments to weave by adding those arguments to the EXTRA_ARGS environmental variable.
This can be used for debugging or for customizing the logging level of weave net.
This can be used for debugging or for customizing the logging level of weave net.
```
spec:
@ -220,10 +220,22 @@ For help with Calico or to report any issues:
#### Calico Backend
In kops 1.12.0 and later Calico uses the k8s APIServer as its datastore. The current setup does not make use of [Typha](https://github.com/projectcalico/typha) - a component intended to lower the impact of Calico on the k8s APIServer which is recommended in [clusters over 50 nodes](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/calico#installing-with-the-kubernetes-api-datastoremore-than-50-nodes) and is strongly recommended in clusters of 100+ nodes.
In kops 1.12.0 and later Calico uses the k8s APIServer as its datastore.
In versions <1.12.0 of kops Calico uses etcd as a backend for storing information about workloads and policies. Calico does not interfere with normal etcd operations and does not require special handling when upgrading etcd. For more information please visit the [etcd Docs](https://coreos.com/etcd/docs/latest/)
#### Configuraing Calico to use Typha
As of Kops 1.12 Calico uses the kube-apiserver as its datastore. The default setup does not make use of [Typha](https://github.com/projectcalico/typha) - a component intended to lower the impact of Calico on the k8s APIServer which is recommended in [clusters over 50 nodes](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/calico#installing-with-the-kubernetes-api-datastoremore-than-50-nodes) and is strongly recommended in clusters of 100+ nodes.
It is possible to configure Calico to use Typha by editing a cluster and adding a
`typhaReplicas` option to the Calico spec:
```
networking:
calico:
typhaReplicas: 3
```
#### Calico troubleshooting
##### New nodes are taking minutes for syncing ip routes and new pods on them can't reach kubedns

View File

@ -2052,6 +2052,23 @@ spec:
description: PrometheusProcessMetricsEnabled enables Prometheus
process metrics collection
type: boolean
typhaPrometheusMetricsEnabled:
description: 'TyphaPrometheusMetricsEnabled enables Prometheus metrics
collection from Typha'
type: boolean
typhaPrometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the
Typha Prometheus metrics server should bind to (default:
9093)'
format: int32
type: integer
typhaReplicas:
description: 'TyphaReplicas is the number of replicas of Typha to
deploy. If set >0 Calico is configured to connect to the Typha service
rather than directly to the kube-apiserver. Intended to reduce the
load on the APIServer (default: 0)'
format: int32
type: integer
type: object
canal:
properties:

View File

@ -96,6 +96,14 @@ type CalicoNetworkingSpec struct {
MajorVersion string `json:"majorVersion,omitempty"`
// IPIPMode is mode for CALICO_IPV4POOL_IPIP
IPIPMode string `json:"ipipMode,omitempty"`
// TyphaPrometheusMetricsEnabled enables Prometheus metrics collection from Typha
// (default: false)
TyphaPrometheusMetricsEnabled bool `json:"typhaPrometheusMetricsEnabled,omitempty"`
// TyphaPrometheusMetricsPort is the TCP port the typha Prometheus metrics server
// should bind to (default: 9093)
TyphaPrometheusMetricsPort int32 `json:"typhaPrometheusMetricsPort,omitempty"`
// TyphaReplicas is the number of replicas of Typha to deploy
TyphaReplicas int32 `json:"typhaReplicas,omitempty"`
}
// CanalNetworkingSpec declares that we want Canal networking

View File

@ -96,6 +96,14 @@ type CalicoNetworkingSpec struct {
MajorVersion string `json:"majorVersion,omitempty"`
// IPIPMode is mode for CALICO_IPV4POOL_IPIP
IPIPMode string `json:"ipipMode,omitempty"`
// TyphaPrometheusMetricsEnabled enables Prometheus metrics collection from Typha
// (default: false)
TyphaPrometheusMetricsEnabled bool `json:"typhaPrometheusMetricsEnabled,omitempty"`
// TyphaPrometheusMetricsPort is the TCP port the typha Prometheus metrics server
// should bind to (default: 9093)
TyphaPrometheusMetricsPort int32 `json:"typhaPrometheusMetricsPort,omitempty"`
// TyphaReplicas is the number of replicas of Typha to deploy
TyphaReplicas int32 `json:"typhaReplicas,omitempty"`
}
// CanalNetworkingSpec declares that we want Canal networking

View File

@ -1118,6 +1118,9 @@ func autoConvert_v1alpha1_CalicoNetworkingSpec_To_kops_CalicoNetworkingSpec(in *
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
out.MajorVersion = in.MajorVersion
out.IPIPMode = in.IPIPMode
out.TyphaPrometheusMetricsEnabled = in.TyphaPrometheusMetricsEnabled
out.TyphaPrometheusMetricsPort = in.TyphaPrometheusMetricsPort
out.TyphaReplicas = in.TyphaReplicas
return nil
}
@ -1136,6 +1139,9 @@ func autoConvert_kops_CalicoNetworkingSpec_To_v1alpha1_CalicoNetworkingSpec(in *
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
out.MajorVersion = in.MajorVersion
out.IPIPMode = in.IPIPMode
out.TyphaPrometheusMetricsEnabled = in.TyphaPrometheusMetricsEnabled
out.TyphaPrometheusMetricsPort = in.TyphaPrometheusMetricsPort
out.TyphaReplicas = in.TyphaReplicas
return nil
}

View File

@ -96,6 +96,14 @@ type CalicoNetworkingSpec struct {
MajorVersion string `json:"majorVersion,omitempty"`
// IPIPMode is mode for CALICO_IPV4POOL_IPIP
IPIPMode string `json:"ipipMode,omitempty"`
// TyphaPrometheusMetricsEnabled enables Prometheus metrics collection from Typha
// (default: false)
TyphaPrometheusMetricsEnabled bool `json:"typhaPrometheusMetricsEnabled,omitempty"`
// TyphaPrometheusMetricsPort is the TCP port the typha Prometheus metrics server
// should bind to (default: 9093)
TyphaPrometheusMetricsPort int32 `json:"typhaPrometheusMetricsPort,omitempty"`
// TyphaReplicas is the number of replicas of Typha to deploy
TyphaReplicas int32 `json:"typhaReplicas,omitempty"`
}
// CanalNetworkingSpec declares that we want Canal networking

View File

@ -1160,6 +1160,9 @@ func autoConvert_v1alpha2_CalicoNetworkingSpec_To_kops_CalicoNetworkingSpec(in *
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
out.MajorVersion = in.MajorVersion
out.IPIPMode = in.IPIPMode
out.TyphaPrometheusMetricsEnabled = in.TyphaPrometheusMetricsEnabled
out.TyphaPrometheusMetricsPort = in.TyphaPrometheusMetricsPort
out.TyphaReplicas = in.TyphaReplicas
return nil
}
@ -1178,6 +1181,9 @@ func autoConvert_kops_CalicoNetworkingSpec_To_v1alpha2_CalicoNetworkingSpec(in *
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
out.MajorVersion = in.MajorVersion
out.IPIPMode = in.IPIPMode
out.TyphaPrometheusMetricsEnabled = in.TyphaPrometheusMetricsEnabled
out.TyphaPrometheusMetricsPort = in.TyphaPrometheusMetricsPort
out.TyphaReplicas = in.TyphaReplicas
return nil
}

View File

@ -399,6 +399,13 @@ func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string
func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if v.TyphaReplicas >= 0 {
} else {
allErrs = append(allErrs,
field.Invalid(fldPath.Child("TyphaReplicas"), v.TyphaReplicas,
fmt.Sprintf("Unable to set number of Typha replicas to less than 0, you've specified %d", v.TyphaReplicas)))
}
switch v.MajorVersion {
case "":
// OK:

View File

@ -340,6 +340,23 @@ func Test_Validate_Calico(t *testing.T) {
Etcd: &kops.EtcdClusterSpec{},
},
},
{
Input: caliInput{
Calico: &kops.CalicoNetworkingSpec{
TyphaReplicas: 3,
},
Etcd: &kops.EtcdClusterSpec{},
},
},
{
Input: caliInput{
Calico: &kops.CalicoNetworkingSpec{
TyphaReplicas: -1,
},
Etcd: &kops.EtcdClusterSpec{},
},
ExpectedErrors: []string{"Invalid value::Calico.TyphaReplicas"},
},
{
Input: caliInput{
Calico: &kops.CalicoNetworkingSpec{

View File

@ -1,7 +1,61 @@
---
# Create all the CustomResourceDefinitions needed for
# Calico policy and networking mode.
# Pulled and modified from: https://docs.projectcalico.org/v3.8/manifests/calico-typha.yaml
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
data:
# You must set a non-zero value for Typha replicas below.
typha_service_name: "{{- if .Networking.Calico.TyphaReplicas -}}calico-typha{{- else -}}none{{- end -}}"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use
{{- if .Networking.Calico.MTU }}
veth_mtu: "{{ .Networking.Calico.MTU }}"
{{- else }}
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
@ -22,6 +76,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -37,6 +93,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -52,6 +110,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -67,6 +127,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Cluster
group: crd.projectcalico.org
@ -77,6 +139,7 @@ spec:
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
@ -210,12 +273,15 @@ spec:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
role.kubernetes.io/networking: "1"
spec:
scope: Namespaced
group: crd.projectcalico.org
@ -225,73 +291,72 @@ spec:
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-config
namespace: kube-system
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
data:
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas
# below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is
# essential.
typha_service_name: "none"
# Configure the MTU to use
{{- if .Networking.Calico.MTU }}
veth_mtu: "{{ .Networking.Calico.MTU }}"
{{- else }}
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
apiVersion: v1
kind: ServiceAccount
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
namespace: kube-system
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
@ -440,48 +505,143 @@ subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
{{ if .Networking.Calico.TyphaReplicas -}}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the calico-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Calico.TyphaReplicas "0" }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-kube-controllers
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
k8s-app: calico-typha
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
serviceAccount: calico-node
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: calico-node
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.8.0
env:
- name: DATASTORE_TYPE
value: kubernetes
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
- image: calico/typha:v3.8.2
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
# Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
# this opens a port on the host, which may need to be secured.
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
role.kubernetes.io/networking: "1"
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end -}}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
@ -505,11 +665,17 @@ spec:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico/node gets scheduled on all nodes.
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
@ -521,11 +687,33 @@ spec:
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the Calico CNI binaries
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: calico/cni:v3.8.2
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.8.0
image: calico/cni:v3.8.2
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
@ -556,12 +744,19 @@ spec:
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.8.2
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
containers:
# Runs calico/node container on each Kubernetes node. This
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.8.0
image: calico/node:v3.8.2
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
@ -621,32 +816,11 @@ spec:
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
# TODO: In previous versions, no longer needed?
# Set noderef for node controller.
#- name: CALICO_K8S_NODE_REF
# valueFrom:
# fieldRef:
# fieldPath: spec.nodeName
securityContext:
privileged: true
resources:
requests:
cpu: 10m
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
@ -675,13 +849,10 @@ spec:
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
# TODO: Still needed?
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
- name: policysync
mountPath: /var/run/nodeagent
volumes:
# Used by calico/node.
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
@ -702,13 +873,96 @@ spec:
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# TODO: Still needed?
# Necessary for gossip based DNS
- name: etc-hosts
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /etc/hosts
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.8.2
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the k8s-ec2-srcdst container, which disables
@ -812,4 +1066,3 @@ spec:
nodeSelector:
node-role.kubernetes.io/master: ""
{{- end -}}

View File

@ -608,7 +608,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
// This means whenever we create or update a networking plugin, we should be sure that:
// 1. the selector is role.kubernetes.io/networking=1
// 2. every object in the manifest is labeleled with role.kubernetes.io/networking=1
// 2. every object in the manifest is labeled with role.kubernetes.io/networking=1
// TODO: Some way to test/enforce this?
@ -807,7 +807,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
"k8s-1.6": "2.6.9-kops.1",
"k8s-1.7": "2.6.12-kops.1",
"k8s-1.7-v3": "3.8.0-kops.1",
"k8s-1.12": "3.8.0-kops.1",
"k8s-1.12": "3.8.2-kops.1",
}
{