Adding Calico V3

- Add Calico configuration field to specify MajorVersion
- Add Calico V3 manifest
- Default new installations to Calico V3
  - Set etcd to V3 when calico is specified for networking
- Validate that etcd is V3 when Calico MajorVersion is v3
- Using Calico v3.2.1
This commit is contained in:
Erik Stidham 2018-05-03 08:49:32 -05:00
parent 8aa61bc983
commit 3da3b415e0
10 changed files with 924 additions and 47 deletions

View File

@ -33,6 +33,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
@ -43,6 +44,7 @@ import (
"k8s.io/kops/pkg/commands"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
@ -918,7 +920,17 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
Backend: "udp",
}
case "calico":
cluster.Spec.Networking.Calico = &api.CalicoNetworkingSpec{}
cluster.Spec.Networking.Calico = &api.CalicoNetworkingSpec{
MajorVersion: "v3",
}
// Validate to check if etcd clusters have an acceptable version
if errList := validation.ValidateEtcdVersionForCalicoV3(cluster.Spec.EtcdClusters[0], cluster.Spec.Networking.Calico.MajorVersion, field.NewPath("Calico")); len(errList) != 0 {
// This is not a special version but simply of the 3 series
for _, etcd := range cluster.Spec.EtcdClusters {
etcd.Version = components.DefaultEtcd3Version_1_11
}
}
case "canal":
cluster.Spec.Networking.Canal = &api.CanalNetworkingSpec{}
case "kube-router":

View File

@ -87,6 +87,8 @@ type CalicoNetworkingSpec struct {
PrometheusGoMetricsEnabled bool `json:"prometheusGoMetricsEnabled,omitempty"`
// PrometheusProcessMetricsEnabled enables Prometheus process metrics collection
PrometheusProcessMetricsEnabled bool `json:"prometheusProcessMetricsEnabled,omitempty"`
// MajorVersion is the version of Calico to use
MajorVersion string `json:"majorVersion,omitempty"`
}
// CanalNetworkingSpec declares that we want Canal networking

View File

@ -87,6 +87,8 @@ type CalicoNetworkingSpec struct {
PrometheusGoMetricsEnabled bool `json:"prometheusGoMetricsEnabled,omitempty"`
// PrometheusProcessMetricsEnabled enables Prometheus process metrics collection
PrometheusProcessMetricsEnabled bool `json:"prometheusProcessMetricsEnabled,omitempty"`
// MajorVersion is the version of Calico to use
MajorVersion string `json:"majorVersion,omitempty"`
}
// CanalNetworkingSpec declares that we want Canal networking

View File

@ -87,6 +87,8 @@ type CalicoNetworkingSpec struct {
PrometheusGoMetricsEnabled bool `json:"prometheusGoMetricsEnabled,omitempty"`
// PrometheusProcessMetricsEnabled enables Prometheus process metrics collection
PrometheusProcessMetricsEnabled bool `json:"prometheusProcessMetricsEnabled,omitempty"`
// MajorVersion is the version of Calico to use
MajorVersion string `json:"majorVersion,omitempty"`
}
// CanalNetworkingSpec declares that we want Canal networking

View File

@ -21,11 +21,14 @@ import (
"net"
"strings"
"github.com/blang/semver"
"k8s.io/apimachinery/pkg/api/validation"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/iam"
)
@ -94,6 +97,9 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er
if spec.Networking != nil {
allErrs = append(allErrs, validateNetworking(spec.Networking, fieldPath.Child("networking"))...)
if spec.Networking.Calico != nil {
allErrs = append(allErrs, validateNetworkingCalico(spec.Networking.Calico, spec.EtcdClusters[0], fieldPath.Child("networking").Child("Calico"))...)
}
}
// IAM additionalPolicies
@ -341,3 +347,44 @@ func validateEtcdClusterSpec(spec *kops.EtcdClusterSpec, fieldPath *field.Path)
return errs
}
func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
version := e.Version
if e.Version == "" {
version = components.DefaultEtcd2Version
}
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
if err != nil {
allErrs = append(allErrs, field.InternalError(fldPath.Child("MajorVersion"), fmt.Errorf("Failed to parse Etcd version to check compatibility: %s", err)))
}
if sem.Major != 3 {
if e.Version == "" {
allErrs = append(allErrs,
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is default(%s)",
e.Name, components.DefaultEtcd2Version)))
} else {
allErrs = append(allErrs,
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is %s", e.Name, e.Version)))
}
}
return allErrs
}
func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch v.MajorVersion {
case "":
// OK:
case "v3":
allErrs = append(allErrs, ValidateEtcdVersionForCalicoV3(e, v.MajorVersion, fldPath)...)
default:
allErrs = append(allErrs, field.NotSupported(fldPath.Child("MajorVersion"), v.MajorVersion, []string{"v3"}))
}
return allErrs
}

View File

@ -291,3 +291,47 @@ func Test_Validate_AdditionalPolicies(t *testing.T) {
testErrors(t, g.Input, errs, g.ExpectedErrors)
}
}
type caliInput struct {
Calico *kops.CalicoNetworkingSpec
Etcd *kops.EtcdClusterSpec
}
func Test_Validate_Calico(t *testing.T) {
grid := []struct {
Input caliInput
ExpectedErrors []string
}{
{
Input: caliInput{
Calico: &kops.CalicoNetworkingSpec{},
Etcd: &kops.EtcdClusterSpec{},
},
},
{
Input: caliInput{
Calico: &kops.CalicoNetworkingSpec{
MajorVersion: "v3",
},
Etcd: &kops.EtcdClusterSpec{
Version: "3.2.18",
},
},
},
{
Input: caliInput{
Calico: &kops.CalicoNetworkingSpec{
MajorVersion: "v3",
},
Etcd: &kops.EtcdClusterSpec{
Version: "2.2.18",
},
},
ExpectedErrors: []string{"Invalid value::Calico.MajorVersion"},
},
}
for _, g := range grid {
errs := validateNetworkingCalico(g.Input.Calico, g.Input.Etcd, field.NewPath("Calico"))
testErrors(t, g.Input, errs, g.ExpectedErrors)
}
}

View File

@ -141,7 +141,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.7
image: quay.io/calico/node:v2.6.9
resources:
requests:
cpu: 10m
@ -226,7 +226,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.2
image: quay.io/calico/cni:v1.11.5
resources:
requests:
cpu: 10m
@ -379,7 +379,7 @@ spec:
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.3
image: quay.io/calico/kube-controllers:v1.0.4
resources:
requests:
cpu: 10m

View File

@ -0,0 +1,748 @@
{{- $etcd_scheme := EtcdScheme }}
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The calico-etcd PetSet service IP:port
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
{{- range $j, $member := $cluster.Members -}}
{{- if $j }},{{ end -}}
{{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
{{- end }}"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
{{- if eq $etcd_scheme "https" }}
"etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem",
"etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem",
"etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem",
"etcd_scheme": "https",
{{- end }}
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
verbs:
- watch
- list
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
# Make sure calico/node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v3.2.1
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,bgp"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to the desired level
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 10m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v3.2.1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
resources:
requests:
cpu: 10m
initContainers:
- name: migrate
image: calico/upgrade:v1.0.5
command: ['/bin/sh', '-c', '/node-init-container.sh']
env:
- name: CALICO_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: CALICO_APIV1_DATASTORE_TYPE
value: "etcdv2"
- name: CALICO_APIV1_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: CALICO_ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: CALICO_ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: CALICO_ETCD_CA_CERT_FILE
value: /certs/ca.pem
- name: CALICO_APIV1_ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: CALICO_APIV1_ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: CALICO_APIV1_ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
volumeMounts:
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Necessary for gossip based DNS
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v3.2.1
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,profile,workloadendpoint,node
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
volumeMounts:
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
initContainers:
- name: migrate
image: calico/upgrade:v1.0.5
command: ['/bin/sh', '-c', '/controller-init.sh']
env:
- name: CALICO_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: CALICO_APIV1_DATASTORE_TYPE
value: "etcdv2"
- name: CALICO_APIV1_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: CALICO_ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: CALICO_ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: CALICO_ETCD_CA_CERT_FILE
value: /certs/ca.pem
- name: CALICO_APIV1_ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: CALICO_APIV1_ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: CALICO_APIV1_ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
volumeMounts:
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
# Necessary for gossip based DNS
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
# This manifest runs the Migration complete container that monitors for the
# completion of the calico-node Daemonset rollout and when it finishes
# successfully rolling out it will mark the migration complete and allow pods
# to be created again.
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-upgrade-job
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-upgrade-job
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- extensions
resources:
- daemonsets
- daemonsets/status
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-upgrade-job
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-upgrade-job
subjects:
- kind: ServiceAccount
name: calico-upgrade-job
namespace: kube-system
---
# If anything in this job is changed then the name of the job
# should be changed because Jobs cannot be updated, so changing
# the name would run a different Job if the previous version had been
# created before and it does not hurt to rerun this job.
apiVersion: batch/v1
kind: Job
metadata:
name: calico-complete-upgrade
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
role.kubernetes.io/networking: "1"
spec:
hostNetwork: true
serviceAccountName: calico-upgrade-job
restartPolicy: OnFailure
containers:
- name: migrate-completion
image: calico/upgrade:v1.0.5
command: ['/bin/sh', '-c', '/completion-job.sh']
env:
- name: EXPECTED_NODE_IMAGE
value: quay.io/calico/node:v3.1.1
# The location of the Calico etcd cluster.
- name: CALICO_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: CALICO_APIV1_DATASTORE_TYPE
value: "etcdv2"
- name: CALICO_APIV1_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: CALICO_ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: CALICO_ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: CALICO_ETCD_CA_CERT_FILE
value: /certs/ca.pem
- name: CALICO_APIV1_ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: CALICO_APIV1_ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: CALICO_APIV1_ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
volumeMounts:
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the k8s-ec2-srcdst container, which disables
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
# This only applies for AWS environments.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.1
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
node-role.kubernetes.io/master: ""
{{- end -}}

View File

@ -155,7 +155,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.7
image: quay.io/calico/node:v2.6.9
resources:
requests:
cpu: 10m
@ -244,7 +244,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.2
image: quay.io/calico/cni:v1.11.5
resources:
requests:
cpu: 10m
@ -314,6 +314,8 @@ metadata:
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
@ -335,7 +337,7 @@ spec:
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.3
image: quay.io/calico/kube-controllers:v1.0.4
resources:
requests:
cpu: 10m

View File

@ -643,53 +643,71 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
key := "networking.projectcalico.org"
versions := map[string]string{
"pre-k8s-1.6": "2.4.2-kops.1",
"k8s-1.6": "2.6.7-kops.2",
"k8s-1.7": "2.6.7-kops.3",
"k8s-1.6": "2.6.9-kops.1",
"k8s-1.7": "2.6.9-kops.1",
"k8s-1.7-v3": "3.2.1-kops.1",
}
{
id := "pre-k8s-1.6"
location := key + "/" + id + ".yaml"
if b.cluster.Spec.Networking.Calico.MajorVersion == "v3" {
{
id := "k8s-1.7-v3"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
}
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
}
} else {
{
id := "pre-k8s-1.6"
location := key + "/" + id + ".yaml"
{
id := "k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
}
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
}
{
id := "k8s-1.6"
location := key + "/" + id + ".yaml"
{
id := "k8s-1.7"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
}
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
{
id := "k8s-1.7"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0",
Id: id,
})
manifests[key+"-"+id] = "addons/" + location
}
}
}