mirror of https://github.com/kubernetes/kops.git
commit
bac89b8de5
|
|
@ -78,6 +78,7 @@ go_library(
|
|||
"//pkg/instancegroups:go_default_library",
|
||||
"//pkg/kopscodecs:go_default_library",
|
||||
"//pkg/kubeconfig:go_default_library",
|
||||
"//pkg/model/components:go_default_library",
|
||||
"//pkg/pki:go_default_library",
|
||||
"//pkg/pretty:go_default_library",
|
||||
"//pkg/resources:go_default_library",
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kops"
|
||||
"k8s.io/kops/cmd/kops/util"
|
||||
api "k8s.io/kops/pkg/apis/kops"
|
||||
|
|
@ -43,6 +44,7 @@ import (
|
|||
"k8s.io/kops/pkg/commands"
|
||||
"k8s.io/kops/pkg/dns"
|
||||
"k8s.io/kops/pkg/featureflag"
|
||||
"k8s.io/kops/pkg/model/components"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
|
||||
|
|
@ -919,7 +921,17 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
Backend: "udp",
|
||||
}
|
||||
case "calico":
|
||||
cluster.Spec.Networking.Calico = &api.CalicoNetworkingSpec{}
|
||||
cluster.Spec.Networking.Calico = &api.CalicoNetworkingSpec{
|
||||
MajorVersion: "v3",
|
||||
}
|
||||
// Validate to check if etcd clusters have an acceptable version
|
||||
if errList := validation.ValidateEtcdVersionForCalicoV3(cluster.Spec.EtcdClusters[0], cluster.Spec.Networking.Calico.MajorVersion, field.NewPath("Calico")); len(errList) != 0 {
|
||||
|
||||
// This is not a special version but simply of the 3 series
|
||||
for _, etcd := range cluster.Spec.EtcdClusters {
|
||||
etcd.Version = components.DefaultEtcd3Version_1_11
|
||||
}
|
||||
}
|
||||
case "canal":
|
||||
cluster.Spec.Networking.Canal = &api.CanalNetworkingSpec{}
|
||||
case "kube-router":
|
||||
|
|
|
|||
|
|
@ -0,0 +1,96 @@
|
|||
# Calico Version 3
|
||||
In early 2018 Version 3 of Calico was released, it included a reworked data
|
||||
model and with that a switch from the etcd v2 to v3 API. This document covers
|
||||
the requirements, upgrade process, and configuration to install
|
||||
Calico Version 3.
|
||||
|
||||
## Requirements
|
||||
- The main requirement needed for Calico Version 3 is the etcd v3 API available
|
||||
with etcd server version 3.
|
||||
- Another requirement is for the Kubernetes version to be a minimum of v1.7.0.
|
||||
|
||||
### etcd
|
||||
Due to the etcd v3 API being a requirement of Calico Version 3
|
||||
(when using etcd as the datastore) not all Kops installations will be
|
||||
upgradable to Calico V3. Installations using etcd v2 (or earlier) will need
|
||||
to remain on Calico V2 or update to etcdv3.
|
||||
|
||||
## Configuration of a new cluster
|
||||
To ensure a new cluster will have Calico Version 3 installed the following
|
||||
two configurations options should be set:
|
||||
- `spec.etcdClusters.etcdMembers[0].Version` (Main cluster) should be
|
||||
set to a Version of etcd greater than 3.x or the default version
|
||||
needs to be greater than 3.x.
|
||||
- The Networking config must have the Calico MajorVersion set to `v3` like
|
||||
the following:
|
||||
```
|
||||
spec:
|
||||
networking:
|
||||
calico:
|
||||
majorVersion: v3
|
||||
```
|
||||
|
||||
Both of the above two settings can be set by doing a `kops edit cluster ...`
|
||||
before bringing the cluster up for the first time.
|
||||
|
||||
With the above two settings your Kops deployed cluster will be running with
|
||||
Calico Version 3.
|
||||
|
||||
### Create cluster networking flag
|
||||
|
||||
When enabling Calico with the `--networking calico` flag, etcd will be set to
|
||||
a v3 version. Feel free to change to a different v3 version of etcd.
|
||||
|
||||
## Upgrading an existing cluster
|
||||
Assuming your cluster meets the requirements it is possible to upgrade
|
||||
your Calico Kops cluster.
|
||||
|
||||
A few notes about the upgrade:
|
||||
- During the first portion of the migration, while the calico-kube-controllers
|
||||
pod is running its Init, no new policies will be applied though already
|
||||
applied policy will be active.
|
||||
- During the migration no new pods will be scheduled as adding new workloads
|
||||
to Calico is blocked. Once the calico-complete-upgrade job has completed
|
||||
pods will once again be schedulable.
|
||||
- The upgrade process that has been automated in kops can be found in
|
||||
[the Upgrading Calico docs](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/upgrade/upgrade).
|
||||
|
||||
Perform the upgrade with the following steps:
|
||||
|
||||
1. First you must ensure that you are running Calico V2.6.5+. With the
|
||||
latest Kops (greater than 1.9) ensuring your cluster is updated can be
|
||||
done by doing a `kops update` on the cluster.
|
||||
1. Verify your Calico data will migrate successfully by installing and
|
||||
configuring the
|
||||
[calico-upgrade command](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/upgrade/setup)
|
||||
and then run `calico-upgrade dry-run` and verify it reports that the
|
||||
migration can be completed successfully.
|
||||
1. Set `majorVersion` field as below by editing
|
||||
your cluster configuration with `kops edit cluster`.
|
||||
```
|
||||
spec:
|
||||
networking:
|
||||
calico:
|
||||
majorVersion: v3
|
||||
```
|
||||
1. Update your cluster with `kops update` like you would normally update.
|
||||
1. Monitor the progress of the migration by using
|
||||
`kubectl get pods -n kube-system` and checking the status of the following pods:
|
||||
- calico-node pods should restart one at a time and all becoming Running
|
||||
- calico-kube-controllers pod will restart and after the first calico-node
|
||||
pod starts running it will start running
|
||||
- calico-complete-upgrade pod will be Completed after all the calico-node
|
||||
pods start running
|
||||
If any of the above fail by entering a crash loop you should investigate
|
||||
by checking the logs with `kubectl -n kube-system logs <pod name>`.
|
||||
1. Once the calico-node and calico-kube-controllers are running and the
|
||||
calico-complete-upgrade pod has completed the migration has finished
|
||||
successfully.
|
||||
|
||||
### Recovering from a partial migration
|
||||
|
||||
The InitContainer of the first calico-node pod that starts will perform the
|
||||
datastore migration necessary for upgrading from Calico v2 to Calico v3, if
|
||||
this InitContainer is killed or restarted when the new datastore is being
|
||||
populated it will be necessary to manually remove the Calico data in the
|
||||
etcd v3 API before the migration will be successful.
|
||||
|
|
@ -34,7 +34,7 @@ has built in support for CNI networking components.
|
|||
|
||||
Several different CNI providers are currently built into kops:
|
||||
|
||||
* [Calico](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/)
|
||||
* [Calico](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/calico#installing-with-the-etcd-datastore)
|
||||
* [Canal (Flannel + Calico)](https://github.com/projectcalico/canal)
|
||||
* [flannel](https://github.com/coreos/flannel) - use `--networking flannel-vxlan` (recommended) or `--networking flannel-udp` (legacy). `--networking flannel` now selects `flannel-vxlan`.
|
||||
* [kopeio-vxlan](https://github.com/kopeio/networking)
|
||||
|
|
@ -170,7 +170,8 @@ To enable this mode in a cluster, with Calico as the CNI and Network Policy prov
|
|||
|
||||
```
|
||||
networking:
|
||||
calico: {}
|
||||
calico:
|
||||
majorVersion: v3
|
||||
```
|
||||
|
||||
You will need to change that block, and add an additional field, to look like this:
|
||||
|
|
@ -178,6 +179,7 @@ You will need to change that block, and add an additional field, to look like th
|
|||
```
|
||||
networking:
|
||||
calico:
|
||||
majorVersion: v3
|
||||
crossSubnet: true
|
||||
```
|
||||
|
||||
|
|
@ -194,6 +196,8 @@ Only the masters have the IAM policy (`ec2:*`) to allow k8s-ec2-srcdst to execut
|
|||
|
||||
For Calico specific documentation please visit the [Calico Docs](http://docs.projectcalico.org/latest/getting-started/kubernetes/).
|
||||
|
||||
For details on upgrading a Calico v2 deployment see [Calico Version 3](calico-v3.md).
|
||||
|
||||
#### Getting help with Calico
|
||||
|
||||
For help with Calico or to report any issues:
|
||||
|
|
|
|||
|
|
@ -87,6 +87,8 @@ type CalicoNetworkingSpec struct {
|
|||
PrometheusGoMetricsEnabled bool `json:"prometheusGoMetricsEnabled,omitempty"`
|
||||
// PrometheusProcessMetricsEnabled enables Prometheus process metrics collection
|
||||
PrometheusProcessMetricsEnabled bool `json:"prometheusProcessMetricsEnabled,omitempty"`
|
||||
// MajorVersion is the version of Calico to use
|
||||
MajorVersion string `json:"majorVersion,omitempty"`
|
||||
}
|
||||
|
||||
// CanalNetworkingSpec declares that we want Canal networking
|
||||
|
|
|
|||
|
|
@ -87,6 +87,8 @@ type CalicoNetworkingSpec struct {
|
|||
PrometheusGoMetricsEnabled bool `json:"prometheusGoMetricsEnabled,omitempty"`
|
||||
// PrometheusProcessMetricsEnabled enables Prometheus process metrics collection
|
||||
PrometheusProcessMetricsEnabled bool `json:"prometheusProcessMetricsEnabled,omitempty"`
|
||||
// MajorVersion is the version of Calico to use
|
||||
MajorVersion string `json:"majorVersion,omitempty"`
|
||||
}
|
||||
|
||||
// CanalNetworkingSpec declares that we want Canal networking
|
||||
|
|
|
|||
|
|
@ -453,6 +453,7 @@ func autoConvert_v1alpha1_CalicoNetworkingSpec_To_kops_CalicoNetworkingSpec(in *
|
|||
out.PrometheusMetricsPort = in.PrometheusMetricsPort
|
||||
out.PrometheusGoMetricsEnabled = in.PrometheusGoMetricsEnabled
|
||||
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
|
||||
out.MajorVersion = in.MajorVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -469,6 +470,7 @@ func autoConvert_kops_CalicoNetworkingSpec_To_v1alpha1_CalicoNetworkingSpec(in *
|
|||
out.PrometheusMetricsPort = in.PrometheusMetricsPort
|
||||
out.PrometheusGoMetricsEnabled = in.PrometheusGoMetricsEnabled
|
||||
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
|
||||
out.MajorVersion = in.MajorVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -87,6 +87,8 @@ type CalicoNetworkingSpec struct {
|
|||
PrometheusGoMetricsEnabled bool `json:"prometheusGoMetricsEnabled,omitempty"`
|
||||
// PrometheusProcessMetricsEnabled enables Prometheus process metrics collection
|
||||
PrometheusProcessMetricsEnabled bool `json:"prometheusProcessMetricsEnabled,omitempty"`
|
||||
// MajorVersion is the version of Calico to use
|
||||
MajorVersion string `json:"majorVersion,omitempty"`
|
||||
}
|
||||
|
||||
// CanalNetworkingSpec declares that we want Canal networking
|
||||
|
|
|
|||
|
|
@ -489,6 +489,7 @@ func autoConvert_v1alpha2_CalicoNetworkingSpec_To_kops_CalicoNetworkingSpec(in *
|
|||
out.PrometheusMetricsPort = in.PrometheusMetricsPort
|
||||
out.PrometheusGoMetricsEnabled = in.PrometheusGoMetricsEnabled
|
||||
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
|
||||
out.MajorVersion = in.MajorVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -505,6 +506,7 @@ func autoConvert_kops_CalicoNetworkingSpec_To_v1alpha2_CalicoNetworkingSpec(in *
|
|||
out.PrometheusMetricsPort = in.PrometheusMetricsPort
|
||||
out.PrometheusGoMetricsEnabled = in.PrometheusGoMetricsEnabled
|
||||
out.PrometheusProcessMetricsEnabled = in.PrometheusProcessMetricsEnabled
|
||||
out.MajorVersion = in.MajorVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,11 +21,14 @@ import (
|
|||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/validation"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/model/components"
|
||||
"k8s.io/kops/pkg/model/iam"
|
||||
)
|
||||
|
||||
|
|
@ -94,6 +97,9 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er
|
|||
|
||||
if spec.Networking != nil {
|
||||
allErrs = append(allErrs, validateNetworking(spec.Networking, fieldPath.Child("networking"))...)
|
||||
if spec.Networking.Calico != nil {
|
||||
allErrs = append(allErrs, validateNetworkingCalico(spec.Networking.Calico, spec.EtcdClusters[0], fieldPath.Child("networking").Child("Calico"))...)
|
||||
}
|
||||
}
|
||||
|
||||
// IAM additionalPolicies
|
||||
|
|
@ -341,3 +347,44 @@ func validateEtcdClusterSpec(spec *kops.EtcdClusterSpec, fieldPath *field.Path)
|
|||
|
||||
return errs
|
||||
}
|
||||
|
||||
func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
version := e.Version
|
||||
if e.Version == "" {
|
||||
version = components.DefaultEtcd2Version
|
||||
}
|
||||
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.InternalError(fldPath.Child("MajorVersion"), fmt.Errorf("Failed to parse Etcd version to check compatibility: %s", err)))
|
||||
}
|
||||
|
||||
if sem.Major != 3 {
|
||||
if e.Version == "" {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
|
||||
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is default(%s)",
|
||||
e.Name, components.DefaultEtcd2Version)))
|
||||
} else {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
|
||||
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is %s", e.Name, e.Version)))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch v.MajorVersion {
|
||||
case "":
|
||||
// OK:
|
||||
case "v3":
|
||||
allErrs = append(allErrs, ValidateEtcdVersionForCalicoV3(e, v.MajorVersion, fldPath)...)
|
||||
default:
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("MajorVersion"), v.MajorVersion, []string{"v3"}))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
|
|
|||
|
|
@ -291,3 +291,47 @@ func Test_Validate_AdditionalPolicies(t *testing.T) {
|
|||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
||||
type caliInput struct {
|
||||
Calico *kops.CalicoNetworkingSpec
|
||||
Etcd *kops.EtcdClusterSpec
|
||||
}
|
||||
|
||||
func Test_Validate_Calico(t *testing.T) {
|
||||
grid := []struct {
|
||||
Input caliInput
|
||||
ExpectedErrors []string
|
||||
}{
|
||||
{
|
||||
Input: caliInput{
|
||||
Calico: &kops.CalicoNetworkingSpec{},
|
||||
Etcd: &kops.EtcdClusterSpec{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: caliInput{
|
||||
Calico: &kops.CalicoNetworkingSpec{
|
||||
MajorVersion: "v3",
|
||||
},
|
||||
Etcd: &kops.EtcdClusterSpec{
|
||||
Version: "3.2.18",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: caliInput{
|
||||
Calico: &kops.CalicoNetworkingSpec{
|
||||
MajorVersion: "v3",
|
||||
},
|
||||
Etcd: &kops.EtcdClusterSpec{
|
||||
Version: "2.2.18",
|
||||
},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Calico.MajorVersion"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateNetworkingCalico(g.Input.Calico, g.Input.Etcd, field.NewPath("Calico"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ spec:
|
|||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v2.6.7
|
||||
image: quay.io/calico/node:v2.6.9
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
|
@ -226,7 +226,7 @@ spec:
|
|||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.11.2
|
||||
image: quay.io/calico/cni:v1.11.5
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
|
@ -379,7 +379,7 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: quay.io/calico/kube-controllers:v1.0.3
|
||||
image: quay.io/calico/kube-controllers:v1.0.4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
|
|
|||
|
|
@ -0,0 +1,748 @@
|
|||
{{- $etcd_scheme := EtcdScheme }}
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# The calico-etcd PetSet service IP:port
|
||||
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
|
||||
{{- range $j, $member := $cluster.Members -}}
|
||||
{{- if $j }},{{ end -}}
|
||||
{{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
|
||||
{{- end }}"
|
||||
|
||||
# Configure the Calico backend to use.
|
||||
calico_backend: "bird"
|
||||
|
||||
# The CNI network configuration to install on each node.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
"etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem",
|
||||
"etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem",
|
||||
"etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem",
|
||||
"etcd_scheme": "https",
|
||||
{{- end }}
|
||||
"log_level": "info",
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
# This manifest installs the calico/node container, as well
|
||||
# as the Calico CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico/node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: calico-node
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v3.2.1
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
{{- end }}
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "kops,bgp"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set noderef for node controller.
|
||||
- name: CALICO_K8S_NODE_REF
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
# Configure the IP Pool from which Pod IPs will be chosen.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "{{ .KubeControllerManager.ClusterCIDR }}"
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to the desired level
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
|
||||
# Set to enable the experimental Prometheus metrics server
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
|
||||
# TCP port that the Prometheus metrics server should bind to
|
||||
- name: FELIX_PROMETHEUSMETRICSPORT
|
||||
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
|
||||
# Enable Prometheus Go runtime metrics collection
|
||||
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
||||
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
|
||||
# Enable Prometheus process metrics collection
|
||||
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
||||
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9099
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -bird-ready
|
||||
- -felix-ready
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
# Necessary for gossip based DNS
|
||||
- mountPath: /etc/hosts
|
||||
name: etc-hosts
|
||||
readOnly: true
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- mountPath: /certs
|
||||
name: calico
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v3.2.1
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
# Necessary for gossip based DNS
|
||||
- mountPath: /etc/hosts
|
||||
name: etc-hosts
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
initContainers:
|
||||
- name: migrate
|
||||
image: calico/upgrade:v1.0.5
|
||||
command: ['/bin/sh', '-c', '/node-init-container.sh']
|
||||
env:
|
||||
- name: CALICO_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
- name: CALICO_APIV1_DATASTORE_TYPE
|
||||
value: "etcdv2"
|
||||
- name: CALICO_APIV1_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: CALICO_ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: CALICO_ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: CALICO_ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
- name: CALICO_APIV1_ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: CALICO_APIV1_ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: CALICO_APIV1_ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
# Necessary for gossip based DNS
|
||||
- mountPath: /etc/hosts
|
||||
name: etc-hosts
|
||||
readOnly: true
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- mountPath: /certs
|
||||
name: calico
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Necessary for gossip based DNS
|
||||
- name: etc-hosts
|
||||
hostPath:
|
||||
path: /etc/hosts
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: calico
|
||||
hostPath:
|
||||
path: /srv/kubernetes/calico
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
||||
# This manifest deploys the Calico Kubernetes controllers.
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
# The controllers must run in the host network namespace so that
|
||||
# it isn't governed by policy that would prevent it from working.
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: quay.io/calico/kube-controllers:v3.2.1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
value: policy,profile,workloadendpoint,node
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: calico
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -r
|
||||
initContainers:
|
||||
- name: migrate
|
||||
image: calico/upgrade:v1.0.5
|
||||
command: ['/bin/sh', '-c', '/controller-init.sh']
|
||||
env:
|
||||
- name: CALICO_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
- name: CALICO_APIV1_DATASTORE_TYPE
|
||||
value: "etcdv2"
|
||||
- name: CALICO_APIV1_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: CALICO_ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: CALICO_ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: CALICO_ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
- name: CALICO_APIV1_ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: CALICO_APIV1_ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: CALICO_APIV1_ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
# Necessary for gossip based DNS
|
||||
- mountPath: /etc/hosts
|
||||
name: etc-hosts
|
||||
readOnly: true
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- mountPath: /certs
|
||||
name: calico
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
volumes:
|
||||
# Necessary for gossip based DNS
|
||||
- name: etc-hosts
|
||||
hostPath:
|
||||
path: /etc/hosts
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: calico
|
||||
hostPath:
|
||||
path: /srv/kubernetes/calico
|
||||
{{- end }}
|
||||
|
||||
# This manifest runs the Migration complete container that monitors for the
|
||||
# completion of the calico-node Daemonset rollout and when it finishes
|
||||
# successfully rolling out it will mark the migration complete and allow pods
|
||||
# to be created again.
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-upgrade-job
|
||||
namespace: kube-system
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-upgrade-job
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- daemonsets/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-upgrade-job
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-upgrade-job
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-upgrade-job
|
||||
namespace: kube-system
|
||||
---
|
||||
# If anything in this job is changed then the name of the job
|
||||
# should be changed because Jobs cannot be updated, so changing
|
||||
# the name would run a different Job if the previous version had been
|
||||
# created before and it does not hurt to rerun this job.
|
||||
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: calico-complete-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico-upgrade-job
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: migrate-completion
|
||||
image: calico/upgrade:v1.0.5
|
||||
command: ['/bin/sh', '-c', '/completion-job.sh']
|
||||
env:
|
||||
- name: EXPECTED_NODE_IMAGE
|
||||
value: quay.io/calico/node:v3.1.1
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: CALICO_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
- name: CALICO_APIV1_DATASTORE_TYPE
|
||||
value: "etcdv2"
|
||||
- name: CALICO_APIV1_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: CALICO_ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: CALICO_ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: CALICO_ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
- name: CALICO_APIV1_ETCD_CERT_FILE
|
||||
value: /certs/calico-client.pem
|
||||
- name: CALICO_APIV1_ETCD_KEY_FILE
|
||||
value: /certs/calico-client-key.pem
|
||||
- name: CALICO_APIV1_ETCD_CA_CERT_FILE
|
||||
value: /certs/ca.pem
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
# Necessary for gossip based DNS
|
||||
- mountPath: /etc/hosts
|
||||
name: etc-hosts
|
||||
readOnly: true
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- mountPath: /certs
|
||||
name: calico
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: etc-hosts
|
||||
hostPath:
|
||||
path: /etc/hosts
|
||||
{{- if eq $etcd_scheme "https" }}
|
||||
- name: calico
|
||||
hostPath:
|
||||
path: /srv/kubernetes/calico
|
||||
{{- end }}
|
||||
|
||||
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
|
||||
# This manifest installs the k8s-ec2-srcdst container, which disables
|
||||
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
|
||||
# This only applies for AWS environments.
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: k8s-ec2-srcdst
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: k8s-ec2-srcdst
|
||||
namespace: kube-system
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: k8s-ec2-srcdst
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: k8s-ec2-srcdst
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: k8s-ec2-srcdst
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: k8s-ec2-srcdst
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: k8s-ec2-srcdst
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: k8s-ec2-srcdst
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: k8s-ec2-srcdst
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
serviceAccountName: k8s-ec2-srcdst
|
||||
containers:
|
||||
- image: ottoyiu/k8s-ec2-srcdst:v0.2.1
|
||||
name: k8s-ec2-srcdst
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
env:
|
||||
- name: AWS_REGION
|
||||
value: {{ Region }}
|
||||
volumeMounts:
|
||||
- name: ssl-certs
|
||||
mountPath: "/etc/ssl/certs/ca-certificates.crt"
|
||||
readOnly: true
|
||||
imagePullPolicy: "Always"
|
||||
volumes:
|
||||
- name: ssl-certs
|
||||
hostPath:
|
||||
path: "/etc/ssl/certs/ca-certificates.crt"
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
{{- end -}}
|
||||
|
|
@ -155,7 +155,7 @@ spec:
|
|||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v2.6.7
|
||||
image: quay.io/calico/node:v2.6.9
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
|
@ -244,7 +244,7 @@ spec:
|
|||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.11.2
|
||||
image: quay.io/calico/cni:v1.11.5
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
|
@ -314,6 +314,8 @@ metadata:
|
|||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
|
|
@ -335,7 +337,7 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: quay.io/calico/kube-controllers:v1.0.3
|
||||
image: quay.io/calico/kube-controllers:v1.0.4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
|
|
|||
|
|
@ -643,53 +643,71 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
key := "networking.projectcalico.org"
|
||||
versions := map[string]string{
|
||||
"pre-k8s-1.6": "2.4.2-kops.1",
|
||||
"k8s-1.6": "2.6.7-kops.2",
|
||||
"k8s-1.7": "2.6.7-kops.3",
|
||||
"k8s-1.6": "2.6.9-kops.1",
|
||||
"k8s-1.7": "2.6.9-kops.1",
|
||||
"k8s-1.7-v3": "3.2.1-kops.1",
|
||||
}
|
||||
|
||||
{
|
||||
id := "pre-k8s-1.6"
|
||||
location := key + "/" + id + ".yaml"
|
||||
if b.cluster.Spec.Networking.Calico.MajorVersion == "v3" {
|
||||
{
|
||||
id := "k8s-1.7-v3"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
} else {
|
||||
{
|
||||
id := "pre-k8s-1.6"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
{
|
||||
id := "k8s-1.6"
|
||||
location := key + "/" + id + ".yaml"
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0 <1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
{
|
||||
id := "k8s-1.6"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
{
|
||||
id := "k8s-1.7"
|
||||
location := key + "/" + id + ".yaml"
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0 <1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
{
|
||||
id := "k8s-1.7"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue