mirror of https://github.com/kubernetes/kops.git
Remove code for no-longer-supported k8s versions
This commit is contained in:
parent
261a2e9801
commit
d3469d6ec2
|
@ -44,12 +44,12 @@ var MagicTimestamp = metav1.Time{Time: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UT
|
|||
|
||||
// TestCreateClusterMinimal runs kops create cluster minimal.example.com --zones us-test-1a
|
||||
func TestCreateClusterMinimal(t *testing.T) {
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.16", "v1alpha2")
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.17", "v1alpha2")
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.18", "v1alpha2")
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.19", "v1alpha2")
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.20", "v1alpha2")
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.21", "v1alpha2")
|
||||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.22", "v1alpha2")
|
||||
}
|
||||
|
||||
// TestCreateClusterOverride tests the override flag
|
||||
|
|
|
@ -63,7 +63,6 @@ go_library(
|
|||
"//upup/pkg/fi/nodeup/nodetasks:go_default_library",
|
||||
"//util/pkg/architectures:go_default_library",
|
||||
"//util/pkg/distributions:go_default_library",
|
||||
"//util/pkg/exec:go_default_library",
|
||||
"//util/pkg/proxy:go_default_library",
|
||||
"//util/pkg/vfs:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/exec"
|
||||
"k8s.io/kops/util/pkg/proxy"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
@ -498,21 +497,14 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
|
|||
|
||||
// Log both to docker and to the logfile
|
||||
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log").ReadOnly = false
|
||||
if b.IsKubernetesGTE("1.15") {
|
||||
// From k8s 1.15, we use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-apiserver"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-apiserver.log")
|
||||
} else {
|
||||
container.Command = exec.WithTee(
|
||||
"/usr/local/bin/kube-apiserver",
|
||||
sortedStrings(flags),
|
||||
"/var/log/kube-apiserver.log")
|
||||
}
|
||||
// We use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-apiserver"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-apiserver.log")
|
||||
|
||||
for _, path := range b.SSLHostPaths() {
|
||||
name := strings.Replace(path, "/", "", -1)
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/distributions"
|
||||
"k8s.io/kops/util/pkg/exec"
|
||||
"k8s.io/kops/util/pkg/proxy"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
@ -191,21 +190,14 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
|
|||
|
||||
// Log both to docker and to the logfile
|
||||
addHostPathMapping(pod, container, "logfile", "/var/log/kube-controller-manager.log").ReadOnly = false
|
||||
if b.IsKubernetesGTE("1.15") {
|
||||
// From k8s 1.15, we use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-controller-manager"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-controller-manager.log")
|
||||
} else {
|
||||
container.Command = exec.WithTee(
|
||||
"/usr/local/bin/kube-controller-manager",
|
||||
sortedStrings(flags),
|
||||
"/var/log/kube-controller-manager.log")
|
||||
}
|
||||
// We use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-controller-manager"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-controller-manager.log")
|
||||
|
||||
for _, path := range b.SSLHostPaths() {
|
||||
name := strings.Replace(path, "/", "", -1)
|
||||
|
|
|
@ -20,6 +20,10 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kops/pkg/dns"
|
||||
"k8s.io/kops/pkg/flagbuilder"
|
||||
"k8s.io/kops/pkg/k8scodecs"
|
||||
|
@ -28,12 +32,6 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/exec"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// KubeProxyBuilder installs kube-proxy
|
||||
|
@ -182,11 +180,6 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
|
|||
"--kubeconfig=/var/lib/kube-proxy/kubeconfig",
|
||||
"--oom-score-adj=-998"}...)
|
||||
|
||||
if !b.IsKubernetesGTE("1.16") {
|
||||
// Removed in 1.16: https://github.com/kubernetes/kubernetes/pull/78294
|
||||
flags = append(flags, `--resource-container=""`)
|
||||
}
|
||||
|
||||
image := kubeProxyImage(b.NodeupModelContext)
|
||||
container := &v1.Container{
|
||||
Name: "kube-proxy",
|
||||
|
@ -217,21 +210,14 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
|
|||
|
||||
// Log both to docker and to the logfile
|
||||
addHostPathMapping(pod, container, "logfile", "/var/log/kube-proxy.log").ReadOnly = false
|
||||
if b.IsKubernetesGTE("1.15") {
|
||||
// From k8s 1.15, we use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-proxy"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-proxy.log")
|
||||
} else {
|
||||
container.Command = exec.WithTee(
|
||||
"/usr/local/bin/kube-proxy",
|
||||
sortedStrings(flags),
|
||||
"/var/log/kube-proxy.log")
|
||||
}
|
||||
// We use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-proxy"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-proxy.log")
|
||||
|
||||
{
|
||||
addHostPathMapping(pod, container, "kubeconfig", "/var/lib/kube-proxy/kubeconfig")
|
||||
|
|
|
@ -18,7 +18,6 @@ package model
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
|
@ -63,11 +62,11 @@ func TestKubeProxyBuilder_buildPod(t *testing.T) {
|
|||
wantErr bool
|
||||
}{
|
||||
{
|
||||
"Setup KubeProxy for kubernetes version 1.10",
|
||||
"Setup KubeProxy for kubernetes version 1.20",
|
||||
fields{
|
||||
&NodeupModelContext{
|
||||
Cluster: cluster,
|
||||
kubernetesVersion: semver.Version{Major: 1, Minor: 10},
|
||||
kubernetesVersion: semver.Version{Major: 1, Minor: 20},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
|
@ -146,12 +145,6 @@ func TestKubeProxyBuilder_buildPod(t *testing.T) {
|
|||
t.Errorf("KubeProxyBuilder.buildPod() Resources = %v, want %v", got.Spec.Containers[0].Resources, tt.want.Spec.Containers[0].Resources)
|
||||
}
|
||||
|
||||
// compare pod spec container command should contain --oom-score-adj=-998
|
||||
gotcommand := got.Spec.Containers[0].Command[2]
|
||||
if !strings.Contains(gotcommand, "--oom-score-adj=-998") {
|
||||
t.Errorf("KubeProxyBuilder.buildPod() Command = %v, want %v", got.Spec.Containers[0].Command, tt.want.Spec.Containers[0].Command)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/exec"
|
||||
"k8s.io/kops/util/pkg/proxy"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
@ -202,21 +201,14 @@ func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
|
|||
|
||||
// Log both to docker and to the logfile
|
||||
addHostPathMapping(pod, container, "logfile", "/var/log/kube-scheduler.log").ReadOnly = false
|
||||
if b.IsKubernetesGTE("1.15") {
|
||||
// From k8s 1.15, we use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-scheduler"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-scheduler.log")
|
||||
} else {
|
||||
container.Command = exec.WithTee(
|
||||
"/usr/local/bin/kube-scheduler",
|
||||
sortedStrings(flags),
|
||||
"/var/log/kube-scheduler.log")
|
||||
}
|
||||
// We use lighter containers that don't include shells
|
||||
// But they have richer logging support via klog
|
||||
container.Command = []string{"/usr/local/bin/kube-scheduler"}
|
||||
container.Args = append(
|
||||
sortedStrings(flags),
|
||||
"--logtostderr=false", //https://github.com/kubernetes/klog/issues/60
|
||||
"--alsologtostderr",
|
||||
"--log-file=/var/log/kube-scheduler.log")
|
||||
|
||||
if c.MaxPersistentVolumes != nil {
|
||||
maxPDV := v1.EnvVar{
|
||||
|
|
|
@ -524,9 +524,7 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro
|
|||
// As of 1.16 we can no longer set critical labels.
|
||||
// kops-controller will set these labels.
|
||||
// For bootstrapping reasons, protokube sets the critical labels for kops-controller to run.
|
||||
if b.Cluster.IsKubernetesGTE("1.16") {
|
||||
c.NodeLabels = nil
|
||||
}
|
||||
c.NodeLabels = nil
|
||||
|
||||
if c.AuthorizationMode == "" && b.Cluster.IsKubernetesGTE("1.19") {
|
||||
c.AuthorizationMode = "Webhook"
|
||||
|
|
|
@ -44,14 +44,10 @@ func (b *CommonBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
"ptp",
|
||||
"tuning",
|
||||
"vlan",
|
||||
}
|
||||
|
||||
// Additions in https://github.com/containernetworking/plugins/releases/tag/v0.8.6
|
||||
if b.IsKubernetesGTE("1.15") {
|
||||
assets = append(assets, "bandwidth")
|
||||
assets = append(assets, "firewall")
|
||||
assets = append(assets, "sbr")
|
||||
assets = append(assets, "static")
|
||||
"bandwidth",
|
||||
"firewall",
|
||||
"sbr",
|
||||
"static",
|
||||
}
|
||||
|
||||
if err := b.AddCNIBinAssets(c, assets); err != nil {
|
||||
|
|
|
@ -20,7 +20,7 @@ spec:
|
|||
- instanceGroup: master-us-test-1a
|
||||
name: master-us-test-1a
|
||||
name: events
|
||||
kubernetesVersion: v1.16.0
|
||||
kubernetesVersion: v1.17.0
|
||||
masterInternalName: api.internal.logflags.example.com
|
||||
masterPublicName: api.logflags.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
|
|
|
@ -777,29 +777,14 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe
|
|||
allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Could not parse as semantic version"))
|
||||
}
|
||||
|
||||
if !(version.Minor >= 6 && version.Minor <= 10) {
|
||||
allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only versions 1.6 through 1.10 are supported"))
|
||||
}
|
||||
|
||||
if version.Minor == 6 && cluster.IsKubernetesGTE("1.16") {
|
||||
allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.6 requires kubernetesVersion before 1.16"))
|
||||
}
|
||||
|
||||
if version.Minor == 7 && cluster.IsKubernetesGTE("1.17") {
|
||||
allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.7 requires kubernetesVersion before 1.17"))
|
||||
}
|
||||
|
||||
if version.Minor == 10 && cluster.IsKubernetesLT("1.16") {
|
||||
allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.10 requires kubernetesVersion 1.16 or newer"))
|
||||
if !(version.Minor >= 8 && version.Minor <= 10) {
|
||||
allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only versions 1.8 through 1.10 are supported"))
|
||||
}
|
||||
|
||||
if v.Hubble != nil && fi.BoolValue(v.Hubble.Enabled) {
|
||||
if !components.IsCertManagerEnabled(cluster) {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hubble", "enabled"), "Hubble requires that cert manager is enabled"))
|
||||
}
|
||||
if version.Minor < 8 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hubble", "enabled"), "Hubble requires Cilium 1.8 or newer"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -997,7 +982,7 @@ func validateEtcdVersion(spec kops.EtcdClusterSpec, fieldPath *field.Path, minim
|
|||
|
||||
version := spec.Version
|
||||
if spec.Version == "" {
|
||||
version = components.DefaultEtcd3Version_1_14
|
||||
version = components.DefaultEtcd3Version_1_17
|
||||
}
|
||||
|
||||
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
|
||||
|
|
|
@ -261,7 +261,7 @@ func TestValidateKubeAPIServer(t *testing.T) {
|
|||
if g.Cluster == nil {
|
||||
g.Cluster = &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "1.16.0",
|
||||
KubernetesVersion: "1.20.0",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -807,35 +807,12 @@ func Test_Validate_Cilium(t *testing.T) {
|
|||
},
|
||||
ExpectedErrors: []string{"Invalid value::cilium.version"},
|
||||
},
|
||||
{
|
||||
Cilium: kops.CiliumNetworkingSpec{
|
||||
Version: "v1.7.0",
|
||||
},
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "1.18.0",
|
||||
},
|
||||
ExpectedErrors: []string{"Forbidden::cilium.version"},
|
||||
},
|
||||
{
|
||||
Cilium: kops.CiliumNetworkingSpec{
|
||||
Version: "v1.7.0",
|
||||
},
|
||||
},
|
||||
{
|
||||
Cilium: kops.CiliumNetworkingSpec{
|
||||
Version: "1.7.0",
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::cilium.version"},
|
||||
},
|
||||
{
|
||||
Cilium: kops.CiliumNetworkingSpec{
|
||||
Version: "v1.7.0",
|
||||
Hubble: &kops.HubbleSpec{
|
||||
Enabled: fi.Bool(true),
|
||||
},
|
||||
},
|
||||
ExpectedErrors: []string{"Forbidden::cilium.hubble.enabled"},
|
||||
},
|
||||
{
|
||||
Cilium: kops.CiliumNetworkingSpec{
|
||||
Version: "v1.8.0",
|
||||
|
@ -864,7 +841,7 @@ func Test_Validate_Cilium(t *testing.T) {
|
|||
Cilium: &g.Cilium,
|
||||
}
|
||||
if g.Spec.KubernetesVersion == "" {
|
||||
g.Spec.KubernetesVersion = "1.15.0"
|
||||
g.Spec.KubernetesVersion = "1.17.0"
|
||||
}
|
||||
cluster := &kops.Cluster{
|
||||
Spec: g.Spec,
|
||||
|
|
|
@ -158,15 +158,9 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
// We make sure to disable AnonymousAuth
|
||||
c.AnonymousAuth = fi.Bool(false)
|
||||
|
||||
if b.IsKubernetesGTE("1.17") {
|
||||
// We query via the kube-apiserver-healthcheck proxy, which listens on port 3990
|
||||
c.InsecureBindAddress = ""
|
||||
c.InsecurePort = 0
|
||||
} else {
|
||||
// Older versions of kubernetes continue to rely on the insecure port: kubernetes issue #43784
|
||||
c.InsecureBindAddress = "127.0.0.1"
|
||||
c.InsecurePort = 8080
|
||||
}
|
||||
// We query via the kube-apiserver-healthcheck proxy, which listens on port 3990
|
||||
c.InsecureBindAddress = ""
|
||||
c.InsecurePort = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -139,22 +139,17 @@ func Image(component string, clusterSpec *kops.ClusterSpec, assetsBuilder *asset
|
|||
return image, nil
|
||||
}
|
||||
|
||||
// The simple name is valid when pulling (before 1.16 it was
|
||||
// only amd64, as of 1.16 it is a manifest list). But if we
|
||||
// The simple name is valid when pulling. But if we
|
||||
// are loading from a tarfile then the image is tagged with
|
||||
// the architecture suffix.
|
||||
//
|
||||
// i.e. k8s.gcr.io/kube-apiserver:v1.16.0 is a manifest list
|
||||
// i.e. k8s.gcr.io/kube-apiserver:v1.20.0 is a manifest list
|
||||
// and we _can_ also pull
|
||||
// k8s.gcr.io/kube-apiserver-amd64:v1.16.0 directly. But if
|
||||
// we load https://.../v1.16.0/amd64/kube-apiserver.tar then
|
||||
// k8s.gcr.io/kube-apiserver-amd64:v1.20.0 directly. But if
|
||||
// we load https://.../v1.20.0/amd64/kube-apiserver.tar then
|
||||
// the image inside that tar file is named
|
||||
// "k8s.gcr.io/kube-apiserver-amd64:v1.16.0"
|
||||
//
|
||||
// But ... this is only the case from 1.16 on...
|
||||
if kubernetesVersion.IsGTE("1.16") {
|
||||
imageName += "-amd64"
|
||||
}
|
||||
// "k8s.gcr.io/kube-apiserver-amd64:v1.20.0"
|
||||
imageName += "-amd64"
|
||||
|
||||
baseURL := clusterSpec.KubernetesVersion
|
||||
baseURL = strings.TrimSuffix(baseURL, "/")
|
||||
|
|
|
@ -49,12 +49,8 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
if fi.StringValue(clusterSpec.Docker.Version) == "" {
|
||||
if b.IsKubernetesGTE("1.21") {
|
||||
docker.Version = fi.String("20.10.5")
|
||||
} else if b.IsKubernetesGTE("1.17") {
|
||||
docker.Version = fi.String("19.03.15")
|
||||
} else if b.IsKubernetesGTE("1.16") {
|
||||
docker.Version = fi.String("18.09.9")
|
||||
} else {
|
||||
docker.Version = fi.String("18.06.3")
|
||||
docker.Version = fi.String("19.03.15")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,8 +34,6 @@ type EtcdOptionsBuilder struct {
|
|||
var _ loader.OptionsBuilder = &EtcdOptionsBuilder{}
|
||||
|
||||
const (
|
||||
DefaultEtcd3Version_1_14 = "3.3.10"
|
||||
|
||||
DefaultEtcd3Version_1_17 = "3.4.3"
|
||||
|
||||
DefaultEtcd3Version_1_19 = "3.4.13"
|
||||
|
@ -56,10 +54,8 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
// We run the k8s-recommended versions of etcd
|
||||
if b.IsKubernetesGTE("1.19") {
|
||||
c.Version = DefaultEtcd3Version_1_19
|
||||
} else if b.IsKubernetesGTE("1.17") {
|
||||
c.Version = DefaultEtcd3Version_1_17
|
||||
} else {
|
||||
c.Version = DefaultEtcd3Version_1_14
|
||||
c.Version = DefaultEtcd3Version_1_17
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -242,18 +242,6 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec) (*v1.Pod
|
|||
}
|
||||
}
|
||||
|
||||
// With etcd-manager the hosts changes are self-contained, so
|
||||
// we don't need to share /etc/hosts. By not sharing we avoid
|
||||
// (1) the temptation to address etcd directly and (2)
|
||||
// problems of concurrent updates to /etc/hosts being hard
|
||||
// from within a container (because locking is very difficult
|
||||
// across bind mounts).
|
||||
//
|
||||
// Introduced with 1.17 to avoid changing existing versions.
|
||||
if b.IsKubernetesLT("1.17") {
|
||||
kubemanifest.MapEtcHosts(pod, container, false)
|
||||
}
|
||||
|
||||
// Remap image via AssetBuilder
|
||||
{
|
||||
remapped, err := b.AssetBuilder.RemapImage(container.Image)
|
||||
|
|
|
@ -33,7 +33,6 @@ func Test_RunEtcdManagerBuilder(t *testing.T) {
|
|||
"tests/minimal",
|
||||
"tests/pollinterval",
|
||||
"tests/proxy",
|
||||
"tests/old_versions_mount_hosts",
|
||||
"tests/overwrite_settings",
|
||||
}
|
||||
for _, basedir := range tests {
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
creationTimestamp: "2016-12-10T22:42:27Z"
|
||||
name: minimal.example.com
|
||||
spec:
|
||||
kubernetesApiAccess:
|
||||
- 0.0.0.0/0
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://clusters.example.com/minimal.example.com
|
||||
etcdClusters:
|
||||
- cpuRequest: 200m
|
||||
etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
memoryRequest: 100Mi
|
||||
name: main
|
||||
provider: Manager
|
||||
backups:
|
||||
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-main
|
||||
- cpuRequest: 100m
|
||||
etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
memoryRequest: 100Mi
|
||||
name: events
|
||||
provider: Manager
|
||||
backups:
|
||||
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events
|
||||
kubernetesVersion: v1.12.0
|
||||
masterInternalName: api.internal.minimal.example.com
|
||||
masterPublicName: api.minimal.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
sshAccess:
|
||||
- 0.0.0.0/0
|
||||
topology:
|
||||
masters: public
|
||||
nodes: public
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-test-1a
|
||||
type: Public
|
||||
zone: us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2016-12-10T22:42:28Z"
|
||||
name: nodes
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: t2.medium
|
||||
maxSize: 2
|
||||
minSize: 2
|
||||
role: Node
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2016-12-10T22:42:28Z"
|
||||
name: master-us-test-1a
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1a
|
|
@ -1,213 +0,0 @@
|
|||
Lifecycle: null
|
||||
Name: etcd-clients-ca
|
||||
Signer: null
|
||||
alternateNames: null
|
||||
oldFormat: false
|
||||
subject: cn=etcd-clients-ca
|
||||
type: ca
|
||||
---
|
||||
Lifecycle: null
|
||||
Name: etcd-manager-ca-events
|
||||
Signer: null
|
||||
alternateNames: null
|
||||
oldFormat: false
|
||||
subject: cn=etcd-manager-ca-events
|
||||
type: ca
|
||||
---
|
||||
Lifecycle: null
|
||||
Name: etcd-manager-ca-main
|
||||
Signer: null
|
||||
alternateNames: null
|
||||
oldFormat: false
|
||||
subject: cn=etcd-manager-ca-main
|
||||
type: ca
|
||||
---
|
||||
Lifecycle: null
|
||||
Name: etcd-peers-ca-events
|
||||
Signer: null
|
||||
alternateNames: null
|
||||
oldFormat: false
|
||||
subject: cn=etcd-peers-ca-events
|
||||
type: ca
|
||||
---
|
||||
Lifecycle: null
|
||||
Name: etcd-peers-ca-main
|
||||
Signer: null
|
||||
alternateNames: null
|
||||
oldFormat: false
|
||||
subject: cn=etcd-peers-ca-main
|
||||
type: ca
|
||||
---
|
||||
Base: memfs://clusters.example.com/minimal.example.com/backups/etcd-events
|
||||
Contents: |-
|
||||
{
|
||||
"memberCount": 1
|
||||
}
|
||||
Lifecycle: null
|
||||
Location: /control/etcd-cluster-spec
|
||||
Name: etcd-cluster-spec-events
|
||||
Public: null
|
||||
---
|
||||
Base: memfs://clusters.example.com/minimal.example.com/backups/etcd-main
|
||||
Contents: |-
|
||||
{
|
||||
"memberCount": 1
|
||||
}
|
||||
Lifecycle: null
|
||||
Location: /control/etcd-cluster-spec
|
||||
Name: etcd-cluster-spec-main
|
||||
Public: null
|
||||
---
|
||||
Base: null
|
||||
Contents: |
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
k8s-app: etcd-manager-events
|
||||
name: etcd-manager-events
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
|
||||
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
|
||||
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
|
||||
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
|
||||
--insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
|
||||
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
|
||||
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
|
||||
> /tmp/pipe 2>&1
|
||||
image: k8s.gcr.io/etcdadm/etcd-manager:3.0.20210430
|
||||
name: etcd-manager
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /rootfs
|
||||
name: rootfs
|
||||
- mountPath: /run
|
||||
name: run
|
||||
- mountPath: /etc/kubernetes/pki/etcd-manager
|
||||
name: pki
|
||||
- mountPath: /etc/hosts
|
||||
name: hosts
|
||||
- mountPath: /var/log/etcd.log
|
||||
name: varlogetcd
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: rootfs
|
||||
- hostPath:
|
||||
path: /run
|
||||
type: DirectoryOrCreate
|
||||
name: run
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/pki/etcd-manager-events
|
||||
type: DirectoryOrCreate
|
||||
name: pki
|
||||
- hostPath:
|
||||
path: /etc/hosts
|
||||
type: File
|
||||
name: hosts
|
||||
- hostPath:
|
||||
path: /var/log/etcd-events.log
|
||||
type: FileOrCreate
|
||||
name: varlogetcd
|
||||
status: {}
|
||||
Lifecycle: null
|
||||
Location: manifests/etcd/events.yaml
|
||||
Name: manifests-etcdmanager-events
|
||||
Public: null
|
||||
---
|
||||
Base: null
|
||||
Contents: |
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
k8s-app: etcd-manager-main
|
||||
name: etcd-manager-main
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
|
||||
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
|
||||
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
|
||||
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
|
||||
--insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
|
||||
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
|
||||
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
|
||||
> /tmp/pipe 2>&1
|
||||
image: k8s.gcr.io/etcdadm/etcd-manager:3.0.20210430
|
||||
name: etcd-manager
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 100Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /rootfs
|
||||
name: rootfs
|
||||
- mountPath: /run
|
||||
name: run
|
||||
- mountPath: /etc/kubernetes/pki/etcd-manager
|
||||
name: pki
|
||||
- mountPath: /etc/hosts
|
||||
name: hosts
|
||||
- mountPath: /var/log/etcd.log
|
||||
name: varlogetcd
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: rootfs
|
||||
- hostPath:
|
||||
path: /run
|
||||
type: DirectoryOrCreate
|
||||
name: run
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/pki/etcd-manager-main
|
||||
type: DirectoryOrCreate
|
||||
name: pki
|
||||
- hostPath:
|
||||
path: /etc/hosts
|
||||
type: File
|
||||
name: hosts
|
||||
- hostPath:
|
||||
path: /var/log/etcd.log
|
||||
type: FileOrCreate
|
||||
name: varlogetcd
|
||||
status: {}
|
||||
Lifecycle: null
|
||||
Location: manifests/etcd/main.yaml
|
||||
Name: manifests-etcdmanager-main
|
||||
Public: null
|
|
@ -32,7 +32,7 @@ spec:
|
|||
provider: Manager
|
||||
backups:
|
||||
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events
|
||||
kubernetesVersion: v1.12.0
|
||||
kubernetesVersion: v1.17.0
|
||||
masterInternalName: api.internal.minimal.example.com
|
||||
masterPublicName: api.minimal.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
|
|
|
@ -39,43 +39,22 @@ func TestImage(t *testing.T) {
|
|||
Component: "kube-apiserver",
|
||||
Cluster: &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "v1.11.0",
|
||||
},
|
||||
},
|
||||
Expected: "k8s.gcr.io/kube-apiserver:v1.11.0",
|
||||
},
|
||||
{
|
||||
Component: "kube-apiserver",
|
||||
Cluster: &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "memfs://v1.11.0-download/",
|
||||
KubernetesVersion: "memfs://v1.20.0-download/",
|
||||
},
|
||||
},
|
||||
VFS: map[string]string{
|
||||
"memfs://v1.11.0-download/bin/linux/amd64/kube-apiserver.docker_tag": "1-11-0dockertag",
|
||||
"memfs://v1.20.0-download/bin/linux/amd64/kube-apiserver.docker_tag": "1-20-0dockertag",
|
||||
},
|
||||
Expected: "k8s.gcr.io/kube-apiserver:1-11-0dockertag",
|
||||
Expected: "k8s.gcr.io/kube-apiserver-amd64:1-20-0dockertag",
|
||||
},
|
||||
{
|
||||
Component: "kube-apiserver",
|
||||
Cluster: &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "memfs://v1.16.0-download/",
|
||||
KubernetesVersion: "1.20.0",
|
||||
},
|
||||
},
|
||||
VFS: map[string]string{
|
||||
"memfs://v1.16.0-download/bin/linux/amd64/kube-apiserver.docker_tag": "1-16-0dockertag",
|
||||
},
|
||||
Expected: "k8s.gcr.io/kube-apiserver-amd64:1-16-0dockertag",
|
||||
},
|
||||
{
|
||||
Component: "kube-apiserver",
|
||||
Cluster: &kops.Cluster{
|
||||
Spec: kops.ClusterSpec{
|
||||
KubernetesVersion: "1.16.0",
|
||||
},
|
||||
},
|
||||
Expected: "k8s.gcr.io/kube-apiserver:v1.16.0",
|
||||
Expected: "k8s.gcr.io/kube-apiserver:v1.20.0",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -38,25 +38,9 @@ type KubeApiserverBuilder struct {
|
|||
|
||||
var _ fi.ModelBuilder = &KubeApiserverBuilder{}
|
||||
|
||||
func (b *KubeApiserverBuilder) useHealthCheckSidecar(c *fi.ModelBuilderContext) bool {
|
||||
// Should we use our health-check proxy, which allows us to
|
||||
// query the secure port without enabling anonymous auth?
|
||||
useHealthCheckSidecar := true
|
||||
// We only turn on the proxy in k8s 1.17 and above
|
||||
if b.IsKubernetesLT("1.17") {
|
||||
useHealthCheckSidecar = false
|
||||
}
|
||||
|
||||
return useHealthCheckSidecar
|
||||
}
|
||||
|
||||
// Build creates the tasks relating to kube-apiserver
|
||||
// Currently we only build the kube-apiserver-healthcheck sidecar
|
||||
func (b *KubeApiserverBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
if !b.useHealthCheckSidecar(c) {
|
||||
return nil
|
||||
}
|
||||
|
||||
manifest, err := b.buildManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -30,7 +30,7 @@ func buildCluster() *api.Cluster {
|
|||
return &api.Cluster{
|
||||
Spec: api.ClusterSpec{
|
||||
CloudProvider: "aws",
|
||||
KubernetesVersion: "v1.15.0",
|
||||
KubernetesVersion: "v1.20.0",
|
||||
KubeAPIServer: &api.KubeAPIServerConfig{},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -219,11 +219,6 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
if _, found := clusterSpec.Kubelet.FeatureGates["ExperimentalCriticalPodAnnotation"]; !found {
|
||||
if b.IsKubernetesLT("1.16") {
|
||||
clusterSpec.Kubelet.FeatureGates["ExperimentalCriticalPodAnnotation"] = "true"
|
||||
}
|
||||
}
|
||||
|
||||
// Set systemd as the default cgroup driver for kubelet from k8s 1.20
|
||||
if b.IsKubernetesGTE("1.20") && clusterSpec.Kubelet.CgroupDriver == "" {
|
||||
|
|
|
@ -58,22 +58,9 @@ func buildOptions(cluster *kops.Cluster) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestFeatureGates(t *testing.T) {
|
||||
cluster := buildKubeletTestCluster()
|
||||
err := buildOptions(cluster)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gates := cluster.Spec.Kubelet.FeatureGates
|
||||
if gates["ExperimentalCriticalPodAnnotation"] != "true" {
|
||||
t.Errorf("ExperimentalCriticalPodAnnotation feature gate should be enabled by default")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFeatureGatesKubernetesVersion(t *testing.T) {
|
||||
cluster := buildKubeletTestCluster()
|
||||
cluster.Spec.KubernetesVersion = "1.16.0"
|
||||
cluster.Spec.KubernetesVersion = "1.17.0"
|
||||
err := buildOptions(cluster)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: minimal.example.com
|
||||
spec:
|
||||
api:
|
||||
dns: {}
|
||||
authorization:
|
||||
rbac: {}
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://tests/minimal.example.com
|
||||
containerRuntime: containerd
|
||||
etcdClusters:
|
||||
- cpuRequest: 200m
|
||||
etcdMembers:
|
||||
- encryptedVolume: true
|
||||
instanceGroup: master-us-test-1a
|
||||
name: a
|
||||
memoryRequest: 100Mi
|
||||
name: main
|
||||
- cpuRequest: 100m
|
||||
etcdMembers:
|
||||
- encryptedVolume: true
|
||||
instanceGroup: master-us-test-1a
|
||||
name: a
|
||||
memoryRequest: 100Mi
|
||||
name: events
|
||||
iam:
|
||||
allowContainerRegistry: true
|
||||
legacy: false
|
||||
kubelet:
|
||||
anonymousAuth: false
|
||||
kubernetesApiAccess:
|
||||
- 0.0.0.0/0
|
||||
kubernetesVersion: v1.16.0
|
||||
masterPublicName: api.minimal.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
cni: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
sshAccess:
|
||||
- 0.0.0.0/0
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-test-1a
|
||||
type: Public
|
||||
zone: us-test-1a
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
name: master-us-test-1a
|
||||
spec:
|
||||
image: kope.io/k8s-1.16-debian-stretch-amd64-hvm-ebs-2021-02-05
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
nodeLabels:
|
||||
kops.k8s.io/instancegroup: master-us-test-1a
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
name: nodes-us-test-1a
|
||||
spec:
|
||||
image: kope.io/k8s-1.16-debian-stretch-amd64-hvm-ebs-2021-02-05
|
||||
machineType: t2.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
nodeLabels:
|
||||
kops.k8s.io/instancegroup: nodes-us-test-1a
|
||||
role: Node
|
||||
subnets:
|
||||
- us-test-1a
|
|
@ -3,4 +3,4 @@ Zones:
|
|||
- us-test-1a
|
||||
CloudProvider: aws
|
||||
Networking: cni
|
||||
KubernetesVersion: v1.16.0
|
||||
KubernetesVersion: v1.22.0
|
|
@ -28,17 +28,12 @@ go_library(
|
|||
"cloudup/resources/addons/metadata-proxy.addons.k8s.io/addon.yaml",
|
||||
"cloudup/resources/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml",
|
||||
"cloudup/resources/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml.template",
|
||||
"cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template",
|
||||
"cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template",
|
||||
"cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml",
|
||||
"cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template",
|
||||
"cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template",
|
||||
"cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template",
|
||||
"cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template",
|
||||
"cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template",
|
||||
|
@ -50,7 +45,6 @@ go_library(
|
|||
"cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.14.0.yaml.template",
|
||||
"cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template",
|
||||
"cloudup/resources/addons/storage-aws.addons.k8s.io/v1.15.0.yaml.template",
|
||||
"cloudup/resources/addons/storage-aws.addons.k8s.io/v1.7.0.yaml.template",
|
||||
"cloudup/resources/addons/storage-gce.addons.k8s.io/v1.7.0.yaml.template",
|
||||
"cloudup/resources/addons/storage-openstack.addons.k8s.io/k8s-1.16.yaml.template",
|
||||
"cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.10.yaml.template",
|
||||
|
|
|
@ -1,146 +0,0 @@
|
|||
# Vendored from https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.5.5/config/v1.5/aws-k8s-cni.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: aws-node
|
||||
rules:
|
||||
- apiGroups:
|
||||
- crd.k8s.amazonaws.com
|
||||
resources:
|
||||
- "*"
|
||||
- namespaces
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs: ["list", "watch", "get"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs: ["list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: aws-node
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: aws-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: aws-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: aws-node
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: aws-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: aws-node
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: aws-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: aws-node
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: "beta.kubernetes.io/os"
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
- key: "beta.kubernetes.io/arch"
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
serviceAccountName: aws-node
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
containers:
|
||||
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.5.5" }}"
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 61678
|
||||
name: metrics
|
||||
name: aws-node
|
||||
env:
|
||||
- name: CLUSTER_NAME
|
||||
value: {{ ClusterName }}
|
||||
- name: AWS_VPC_K8S_CNI_LOGLEVEL
|
||||
value: DEBUG
|
||||
- name: MY_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: WATCH_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- range .Networking.AmazonVPC.Env }}
|
||||
- name: {{ .Name }}
|
||||
value: "{{ .Value }}"
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
- mountPath: /host/var/log
|
||||
name: log-dir
|
||||
- mountPath: /var/run/docker.sock
|
||||
name: dockersock
|
||||
volumes:
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: log-dir
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: eniconfigs.crd.k8s.amazonaws.com
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.k8s.amazonaws.com
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
names:
|
||||
plural: eniconfigs
|
||||
singular: eniconfig
|
||||
kind: ENIConfig
|
|
@ -1,819 +0,0 @@
|
|||
{{- if CiliumSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cilium-ipsec-keys
|
||||
namespace: kube-system
|
||||
stringData:
|
||||
{{ CiliumSecret }}
|
||||
---
|
||||
{{- end }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
{{ with .Networking.Cilium }}
|
||||
|
||||
{{- if .EtcdManaged }}
|
||||
kvstore: etcd
|
||||
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
|
||||
|
||||
etcd-config: |-
|
||||
---
|
||||
endpoints:
|
||||
- https://{{ $.MasterInternalName }}:4003
|
||||
|
||||
trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt'
|
||||
key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key'
|
||||
cert-file: '/var/lib/etcd-secrets/etcd-client-cilium.crt'
|
||||
{{ end }}
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# kubectl get ciliumid
|
||||
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
|
||||
# configured below. Cilium versions before 1.6 supported only the kvstore
|
||||
# backend. Upgrades from these older cilium versions should continue using
|
||||
# the kvstore by commenting out the identity-allocation-mode below, or
|
||||
# setting it to "kvstore".
|
||||
identity-allocation-mode: crd
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "{{ .Debug }}"
|
||||
{{ if .EnablePrometheusMetrics }}
|
||||
# If you want metrics enabled in all of your Cilium agents, set the port for
|
||||
# which the Cilium agents will have their metrics exposed.
|
||||
# This option deprecates the "prometheus-serve-addr" in the
|
||||
# "cilium-metrics-config" ConfigMap
|
||||
# NOTE that this will open the port on ALL nodes where Cilium pods are
|
||||
# scheduled.
|
||||
prometheus-serve-addr: ":{{ .AgentPrometheusPort }}"
|
||||
{{ end }}
|
||||
{{ if .EnableEncryption }}
|
||||
enable-ipsec: "true"
|
||||
ipsec-key-file: /etc/ipsec/keys
|
||||
{{ end }}
|
||||
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
||||
# address.
|
||||
enable-ipv4: "true"
|
||||
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
||||
# address.
|
||||
enable-ipv6: "false"
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: "{{ .MonitorAggregation }}"
|
||||
# ct-global-max-entries-* specifies the maximum number of connections
|
||||
# supported across all endpoints, split by protocol: tcp or other. One pair
|
||||
# of maps uses these values for IPv4 connections, and another pair of maps
|
||||
# use these values for IPv6 connections.
|
||||
#
|
||||
# If these values are modified, then during the next Cilium startup the
|
||||
# tracking of ongoing connections may be disrupted. This may lead to brief
|
||||
# policy drops or a change in loadbalancing decisions for a connection.
|
||||
#
|
||||
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
|
||||
# during the upgrade process, comment out these options.
|
||||
bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}"
|
||||
bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# This may lead to policy drops or a change in loadbalancing decisions for a
|
||||
# connection for some time. Endpoints may need to be recreated to restore
|
||||
# connectivity.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "{{- if .PreallocateBPFMaps -}}true{{- else -}}false{{- end -}}"
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}"
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: "{{ .Tunnel }}"
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: "{{ .ClusterName }}"
|
||||
|
||||
# DNS response code for rejecting DNS requests,
|
||||
# available options are "nameError" and "refused"
|
||||
tofqdns-dns-reject-response-code: "{{ .ToFqdnsDNSRejectResponseCode }}"
|
||||
# This option is disabled by default starting from version 1.4.x in favor
|
||||
# of a more powerful DNS proxy-based implementation, see [0] for details.
|
||||
# Enable this option if you want to use FQDN policies but do not want to use
|
||||
# the DNS proxy.
|
||||
#
|
||||
# To ease upgrade, users may opt to set this option to "true".
|
||||
# Otherwise please refer to the Upgrade Guide [1] which explains how to
|
||||
# prepare policy rules for upgrade.
|
||||
#
|
||||
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
|
||||
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||
tofqdns-enable-poller: "{{- if .ToFqdnsEnablePoller -}}true{{- else -}}false{{- end -}}"
|
||||
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
|
||||
wait-bpf-mount: "false"
|
||||
# Enable fetching of container-runtime specific metadata
|
||||
#
|
||||
# By default, the Kubernetes pod and namespace labels are retrieved and
|
||||
# associated with endpoints for identification purposes. By integrating
|
||||
# with the container runtime, container runtime specific labels can be
|
||||
# retrieved, such labels will be prefixed with container:
|
||||
#
|
||||
# CAUTION: The container runtime labels can include information such as pod
|
||||
# annotations which may result in each pod being associated a unique set of
|
||||
# labels which can result in excessive security identities being allocated.
|
||||
# Please review the labels filter when enabling container runtime labels.
|
||||
#
|
||||
# Supported values:
|
||||
# - containerd
|
||||
# - crio
|
||||
# - docker
|
||||
# - none
|
||||
# - auto (automatically detect the container runtime)
|
||||
#
|
||||
container-runtime: "{{ .ContainerRuntimeLabels }}"
|
||||
masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}"
|
||||
install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}"
|
||||
auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}"
|
||||
{{ if .EnableHostReachableServices }}
|
||||
enable-host-reachable-services: "{{ .EnableHostReachableServices }}"
|
||||
{{ end }}
|
||||
enable-node-port: "{{ .EnableNodePort }}"
|
||||
kube-proxy-replacement: "{{- if .EnableNodePort -}}strict{{- else -}}partial{{- end -}}"
|
||||
enable-remote-node-identity: "{{- if .EnableRemoteNodeIdentity -}}true{{- else -}}false{{- end -}}"
|
||||
{{ with .Ipam }}
|
||||
ipam: {{ . }}
|
||||
{{ if eq . "eni" }}
|
||||
enable-endpoint-routes: "true"
|
||||
auto-create-cilium-node-resource: "true"
|
||||
blacklist-conflicting-routes: "false"
|
||||
{{ else if eq . "hostscope" }}
|
||||
k8s-require-ipv4-pod-cidr: "true"
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }} # With .Networking.Cilium end
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- nodes
|
||||
- endpoints
|
||||
- componentstatuses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
- ciliumidentities/status
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically read from k8s and import the node's pod CIDR to cilium's
|
||||
# etcd so all nodes know how to reach another pod running in a different
|
||||
# node.
|
||||
- nodes
|
||||
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
- ciliumidentities/status
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
kubernetes.io/cluster-service: "true"
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# This annotation plus the CriticalAddonsOnly toleration makes
|
||||
# cilium to be a critical pod in the cluster, which ensures cilium
|
||||
# gets priority scheduling.
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
command:
|
||||
- cilium-agent
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_FLANNEL_MASTER_DEVICE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: flannel-master-device
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: flannel-uninstall-on-exit
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
- name: CILIUM_CNI_CHAINING_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cni-chaining-mode
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CUSTOM_CNI_CONF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: custom-cni-conf
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "{{.MasterInternalName}}"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "443"
|
||||
{{ with .Networking.Cilium.EnablePolicy }}
|
||||
- name: CILIUM_ENABLE_POLICY
|
||||
value: {{ . }}
|
||||
{{ end }}
|
||||
{{ with .Networking.Cilium }}
|
||||
image: "docker.io/cilium/cilium:{{ .Version }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- /cni-install.sh
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cilium
|
||||
- status
|
||||
- --brief
|
||||
failureThreshold: 10
|
||||
# The initial delay for the liveness probe is intentionally large to
|
||||
# avoid an endless kill & restart cycle if in the event that the initial
|
||||
# bootstrapping takes longer than expected.
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
name: cilium-agent
|
||||
{{ if .EnablePrometheusMetrics }}
|
||||
ports:
|
||||
- containerPort: {{ .AgentPrometheusPort }}
|
||||
hostPort: {{ .AgentPrometheusPort }}
|
||||
name: prometheus
|
||||
protocol: TCP
|
||||
{{ end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ or .CPURequest "25m" }}
|
||||
memory: {{ or .MemoryRequest "128Mi" }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cilium
|
||||
- status
|
||||
- --brief
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- SYS_MODULE
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-path
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: etc-cni-netd
|
||||
{{ if .EtcdManaged }}
|
||||
- mountPath: /var/lib/etcd-config
|
||||
name: etcd-config-path
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/etcd-secrets
|
||||
name: etcd-secrets
|
||||
readOnly: true
|
||||
{{ end }}
|
||||
- mountPath: /var/lib/cilium/clustermesh
|
||||
name: clustermesh-secrets
|
||||
readOnly: true
|
||||
- mountPath: /tmp/cilium/config-map
|
||||
name: cilium-config-path
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
{{ if CiliumSecret }}
|
||||
- mountPath: /etc/ipsec
|
||||
name: cilium-ipsec-secrets
|
||||
{{ end }}
|
||||
hostNetwork: true
|
||||
initContainers:
|
||||
- command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-bpf-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_WAIT_BPF_MOUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: wait-bpf-mount
|
||||
name: cilium-config
|
||||
optional: true
|
||||
image: "docker.io/cilium/cilium:{{ .Version }}"
|
||||
## end of `with .Networking.Cilium`
|
||||
#{{ end }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: clean-cilium-state
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
priorityClassName: system-node-critical
|
||||
restartPolicy: Always
|
||||
serviceAccount: cilium
|
||||
serviceAccountName: cilium
|
||||
terminationGracePeriodSeconds: 1
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
name: cilium-run
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
name: bpf-maps
|
||||
# To install cilium cni plugin in the host
|
||||
- hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
name: cni-path
|
||||
# To install cilium cni configuration in the host
|
||||
- hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
name: etc-cni-netd
|
||||
# To be able to load kernel modules
|
||||
- hostPath:
|
||||
path: /lib/modules
|
||||
name: lib-modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
name: xtables-lock
|
||||
# To read the clustermesh configuration
|
||||
{{- if .Networking.Cilium.EtcdManaged }}
|
||||
# To read the etcd config stored in config maps
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: etcd-config
|
||||
path: etcd.config
|
||||
name: cilium-config
|
||||
name: etcd-config-path
|
||||
# To read the Cilium etcd secrets in case the user might want to use TLS
|
||||
- name: etcd-secrets
|
||||
hostPath:
|
||||
path: /etc/kubernetes/pki/cilium
|
||||
type: Directory
|
||||
{{- end }}
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: cilium-clustermesh
|
||||
# To read the configuration from the config map
|
||||
- configMap:
|
||||
name: cilium-config
|
||||
name: cilium-config-path
|
||||
{{ if CiliumSecret }}
|
||||
- name: cilium-ipsec-secrets
|
||||
secret:
|
||||
secretName: cilium-ipsec-keys
|
||||
{{ end }}
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
- --identity-allocation-mode=$(CILIUM_IDENTITY_ALLOCATION_MODE)
|
||||
{{ with .Networking.Cilium }}
|
||||
{{ if .EnablePrometheusMetrics }}
|
||||
- --enable-metrics
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
command:
|
||||
- cilium-operator
|
||||
env:
|
||||
- name: CILIUM_IDENTITY_ALLOCATION_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: identity-allocation-mode
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTER_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cluster-name
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTER_ID
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cluster-id
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_IPAM
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ipam
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_DISABLE_ENDPOINT_CRD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: disable-endpoint-crd
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_KVSTORE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: kvstore
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_KVSTORE_OPT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: kvstore-opt
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_DEFAULT_REGION
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "{{.MasterInternalName}}"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "443"
|
||||
{{ with .Networking.Cilium }}
|
||||
image: "docker.io/cilium/operator:{{ .Version }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: cilium-operator
|
||||
{{ if .EnablePrometheusMetrics }}
|
||||
ports:
|
||||
- containerPort: 6942
|
||||
hostPort: 6942
|
||||
name: prometheus
|
||||
protocol: TCP
|
||||
{{ end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ or .CPURequest "25m" }}
|
||||
memory: {{ or .MemoryRequest "128Mi" }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
{{- if .EtcdManaged }}
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/etcd-config
|
||||
name: etcd-config-path
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/etcd-secrets
|
||||
name: etcd-secrets
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
hostNetwork: true
|
||||
priorityClassName: system-cluster-critical
|
||||
restartPolicy: Always
|
||||
serviceAccount: cilium-operator
|
||||
serviceAccountName: cilium-operator
|
||||
{{- if .EtcdManaged }}
|
||||
volumes:
|
||||
# To read the etcd config stored in config maps
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: etcd-config
|
||||
path: etcd.config
|
||||
name: cilium-config
|
||||
name: etcd-config-path
|
||||
# To read the k8s etcd secrets in case the user might want to use TLS
|
||||
- name: etcd-secrets
|
||||
hostPath:
|
||||
path: /etc/kubernetes/pki/cilium
|
||||
type: Directory
|
||||
{{- end }}
|
||||
|
||||
{{ if eq .Ipam "eni" }}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
{{ end }}
|
||||
{{ end }}
|
|
@ -1,608 +0,0 @@
|
|||
# Pulled and modified from: https://docs.projectcalico.org/v3.7/manifests/canal.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Canal installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: canal-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "none"
|
||||
# The interface used by canal for host <-> host communication.
|
||||
# If left blank, then the interface is chosen using the node's
|
||||
# default route.
|
||||
canal_iface: ""
|
||||
|
||||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
masquerade: "true"
|
||||
|
||||
# MTU default is 1500, can be overridden
|
||||
veth_mtu: "{{- or .Networking.Canal.MTU "1500" }}"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"mtu": __CNI_MTU__,
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Flannel network configuration. Mounted into the flannel container.
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "{{ .NonMasqueradeCIDR }}",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
# Source: calico/templates/kdd-crds.yaml
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgpconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPConfiguration
|
||||
plural: bgpconfigurations
|
||||
singular: bgpconfiguration
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkSet
|
||||
plural: networksets
|
||||
singular: networkset
|
||||
|
||||
---
|
||||
|
||||
# Include a clusterrole for the calico-node DaemonSet,
|
||||
# and bind it to the canal serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico
|
||||
rules:
|
||||
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
# Used to discover service IPs for advertisement.
|
||||
- watch
|
||||
- list
|
||||
# Used to discover Typhas.
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Needed for clearing NodeNetworkUnavailable flag.
|
||||
- patch
|
||||
# Calico stores some configuration information in node annotations.
|
||||
- update
|
||||
# Watch for changes to Kubernetes NetworkPolicies.
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# The CNI plugin patches pods/status.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico monitors various CRDs for config.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico must create and update some CRDs on startup.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico stores some configuration information on the node.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# These permissions are only required for upgrade from v2.6, and can
|
||||
# be removed after upgrade or on fresh installations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
---
|
||||
# Flannel ClusterRole
|
||||
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/k8s-manifests/kube-flannel-rbac.yml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
# Bind the flannel ClusterRole to the canal ServiceAccount.
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: canal-flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
---
|
||||
# Bind the Calico ClusterRole to the canal ServiceAccount.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: canal-calico
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
# This manifest installs the calico/node container, as well
|
||||
# as the Calico CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: canal
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: canal
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: canal
|
||||
annotations:
|
||||
# This, along with the CriticalAddonsOnly toleration below,
|
||||
# marks the pod as a critical add-on, ensuring it gets
|
||||
# priority scheduling and that its resources are reserved
|
||||
# if it ever gets evicted.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure canal gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: canal
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
initContainers:
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: calico/cni:v3.7.5
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-canal.conflist"
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: veth_mtu
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: calico/node:v3.7.5
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Configure route aggregation based on pod CIDR.
|
||||
- name: USE_POD_CIDR
|
||||
value: "true"
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Don't enable BGP.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
value: "none"
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,canal"
|
||||
# Period, in seconds, at which felix re-applies all iptables state
|
||||
- name: FELIX_IPTABLESREFRESHINTERVAL
|
||||
value: "60"
|
||||
# No IP address needed.
|
||||
- name: IP
|
||||
value: ""
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: veth_mtu
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to "INFO"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
|
||||
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
|
||||
- name: FELIX_CHAININSERTMODE
|
||||
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
|
||||
# Set to enable the experimental Prometheus metrics server
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
|
||||
# TCP port that the Prometheus metrics server should bind to
|
||||
- name: FELIX_PROMETHEUSMETRICSPORT
|
||||
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
|
||||
# Enable Prometheus Go runtime metrics collection
|
||||
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
|
||||
# Enable Prometheus process metrics collection
|
||||
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ or .Networking.Canal.CPURequest "100m" }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9099
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9099
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
# This container runs flannel using the kube-subnet-mgr backend
|
||||
# for allocating subnets.
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.11.0
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: FLANNELD_IFACE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: canal_iface
|
||||
- name: FLANNELD_IP_MASQ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: masquerade
|
||||
{{- if eq .Networking.Canal.DisableFlannelForwardRules true }}
|
||||
- name: FLANNELD_IPTABLES_FORWARD_RULES
|
||||
value: "false"
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# Used by flannel.
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: canal-config
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
|
@ -1,864 +0,0 @@
|
|||
# Pulled and modified from: https://docs.projectcalico.org/v3.12/manifests/canal.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Canal installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: canal-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}"
|
||||
# The interface used by canal for host <-> host communication.
|
||||
# If left blank, then the interface is chosen using the node's
|
||||
# default route.
|
||||
canal_iface: ""
|
||||
|
||||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
masquerade: "true"
|
||||
|
||||
# Configure the MTU to use
|
||||
{{- if .Networking.Canal.MTU }}
|
||||
veth_mtu: "{{ .Networking.Canal.MTU }}"
|
||||
{{- else }}
|
||||
veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
|
||||
{{- end }}
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
},
|
||||
{
|
||||
"type": "bandwidth",
|
||||
"capabilities": {"bandwidth": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Flannel network configuration. Mounted into the flannel container.
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "{{ .NonMasqueradeCIDR }}",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
# Source: calico/templates/kdd-crds.yaml
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamblocks.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMBlock
|
||||
plural: ipamblocks
|
||||
singular: ipamblock
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: blockaffinities.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BlockAffinity
|
||||
plural: blockaffinities
|
||||
singular: blockaffinity
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamhandles.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMHandle
|
||||
plural: ipamhandles
|
||||
singular: ipamhandle
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamconfigs.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMConfig
|
||||
plural: ipamconfigs
|
||||
singular: ipamconfig
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgppeers.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPPeer
|
||||
plural: bgppeers
|
||||
singular: bgppeer
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgpconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPConfiguration
|
||||
plural: bgpconfigurations
|
||||
singular: bgpconfiguration
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkSet
|
||||
plural: networksets
|
||||
singular: networkset
|
||||
---
|
||||
# Source: calico/templates/rbac.yaml
|
||||
|
||||
# Include a clusterrole for the calico-node DaemonSet,
|
||||
# and bind it to the calico-node serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico
|
||||
rules:
|
||||
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
# Used to discover service IPs for advertisement.
|
||||
- watch
|
||||
- list
|
||||
# Used to discover Typhas.
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Needed for clearing NodeNetworkUnavailable flag.
|
||||
- patch
|
||||
# Calico stores some configuration information in node annotations.
|
||||
- update
|
||||
# Watch for changes to Kubernetes NetworkPolicies.
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# The CNI plugin patches pods/status.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico monitors various CRDs for config.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico must create and update some CRDs on startup.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico stores some configuration information on the node.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# These permissions are only required for upgrade from v2.6, and can
|
||||
# be removed after upgrade or on fresh installations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
---
|
||||
# Flannel ClusterRole
|
||||
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
# Bind the flannel ClusterRole to the canal ServiceAccount.
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: canal-flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: canal-calico
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
|
||||
{{ if .Networking.Canal.TyphaReplicas -}}
|
||||
---
|
||||
# Source: calico/templates/calico-typha.yaml
|
||||
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
|
||||
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
ports:
|
||||
- port: 5473
|
||||
protocol: TCP
|
||||
targetPort: calico-typha
|
||||
name: calico-typha
|
||||
selector:
|
||||
k8s-app: calico-typha
|
||||
|
||||
---
|
||||
|
||||
# This manifest creates a Deployment of Typha to back the above service.
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
|
||||
# typha_service_name variable in the canal-config ConfigMap above.
|
||||
#
|
||||
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
|
||||
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
|
||||
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
|
||||
replicas: {{ or .Networking.Canal.TyphaReplicas 0 }}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-typha
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
annotations:
|
||||
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
|
||||
# add-on, ensuring it gets priority scheduling and that its resources are reserved
|
||||
# if it ever gets evicted.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
kubernetes.io/role: master
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
|
||||
# as a host-networked pod.
|
||||
serviceAccountName: canal
|
||||
priorityClassName: system-cluster-critical
|
||||
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
|
||||
securityContext:
|
||||
fsGroup: 65534
|
||||
containers:
|
||||
- image: calico/typha:v3.12.2
|
||||
name: calico-typha
|
||||
ports:
|
||||
- containerPort: 5473
|
||||
name: calico-typha
|
||||
protocol: TCP
|
||||
env:
|
||||
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
|
||||
- name: TYPHA_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
# Disable logging to file and syslog since those don't make sense in Kubernetes.
|
||||
- name: TYPHA_LOGFILEPATH
|
||||
value: "none"
|
||||
- name: TYPHA_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
# Monitor the Kubernetes API to find the number of running instances and rebalance
|
||||
# connections.
|
||||
- name: TYPHA_CONNECTIONREBALANCINGMODE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_DATASTORETYPE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: TYPHA_PROMETHEUSMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}"
|
||||
- name: TYPHA_PROMETHEUSMETRICSPORT
|
||||
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9098
|
||||
host: localhost
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 30
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9098
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
|
||||
---
|
||||
|
||||
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
|
||||
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-typha
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-node.yaml
|
||||
# This manifest installs the canal container, as well
|
||||
# as the CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: canal
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: canal
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: canal
|
||||
annotations:
|
||||
# This, along with the CriticalAddonsOnly toleration below,
|
||||
# marks the pod as a critical add-on, ensuring it gets
|
||||
# priority scheduling and that its resources are reserved
|
||||
# if it ever gets evicted.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure canal gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: canal
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
priorityClassName: system-node-critical
|
||||
initContainers:
|
||||
# This container installs the CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: calico/cni:v3.12.2
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-canal.conflist"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: veth_mtu
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: calico/pod2daemon-flexvol:v3.12.2
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
# Runs canal container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: calico/node:v3.12.2
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Configure route aggregation based on pod CIDR.
|
||||
- name: USE_POD_CIDR
|
||||
value: "true"
|
||||
{{- if .Networking.Canal.TyphaReplicas }}
|
||||
# Typha support: controlled by the ConfigMap.
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: typha_service_name
|
||||
{{- end }}
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Don't enable BGP.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
value: "none"
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
# was value: "k8s,bgp"
|
||||
value: "k8s,canal"
|
||||
# Period, in seconds, at which felix re-applies all iptables state
|
||||
- name: FELIX_IPTABLESREFRESHINTERVAL
|
||||
value: "60"
|
||||
# No IP address needed.
|
||||
- name: IP
|
||||
value: ""
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: veth_mtu
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to "INFO"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
|
||||
# kops additions
|
||||
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
|
||||
- name: FELIX_CHAININSERTMODE
|
||||
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
|
||||
# Set Felix iptables binary variant, Legacy or NFT
|
||||
- name: FELIX_IPTABLESBACKEND
|
||||
value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}"
|
||||
# Set to enable the experimental Prometheus metrics server
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
|
||||
# TCP port that the Prometheus metrics server should bind to
|
||||
- name: FELIX_PROMETHEUSMETRICSPORT
|
||||
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
|
||||
# Enable Prometheus Go runtime metrics collection
|
||||
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
|
||||
# Enable Prometheus process metrics collection
|
||||
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
||||
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ or .Networking.Canal.CPURequest "100m" }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9099
|
||||
host: localhost
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
- name: policysync
|
||||
mountPath: /var/run/nodeagent
|
||||
# This container runs flannel using the kube-subnet-mgr backend
|
||||
# for allocating subnets.
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.11.0
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: FLANNELD_IFACE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: canal_iface
|
||||
- name: FLANNELD_IP_MASQ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: masquerade
|
||||
{{- if eq .Networking.Canal.DisableFlannelForwardRules true }}
|
||||
- name: FLANNELD_IPTABLES_FORWARD_RULES
|
||||
value: "false"
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
# Used by canal.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# Used by flannel.
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: canal-config
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Used to create per-pod Unix Domain Sockets
|
||||
- name: policysync
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /var/run/nodeagent
|
||||
# Used to install Flex Volume Driver
|
||||
- name: flexvol-driver-host
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
File diff suppressed because it is too large
Load Diff
|
@ -1,24 +0,0 @@
|
|||
{{ if WithDefaultBool .CloudConfig.ManageStorageClasses true }}
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: default
|
||||
labels:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
provisioner: kubernetes.io/aws-ebs
|
||||
parameters:
|
||||
type: gp2
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: gp2
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
provisioner: kubernetes.io/aws-ebs
|
||||
parameters:
|
||||
type: gp2
|
||||
{{ end }}
|
|
@ -222,7 +222,6 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.16.0-alpha.0",
|
||||
NeedsRollingUpdate: "control-plane",
|
||||
Id: id,
|
||||
})
|
||||
|
@ -472,12 +471,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
id := "k8s-1.15"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.15.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -591,26 +589,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.15.0",
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
|
||||
{
|
||||
id := "v1.7.0"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.15.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -674,12 +657,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
version := "1.0.74"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.14.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -769,31 +751,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
"k8s-1.16": "3.19.0-kops.1",
|
||||
}
|
||||
|
||||
{
|
||||
id := "k8s-1.12"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.16.0",
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
|
||||
{
|
||||
id := "k8s-1.16"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.16.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -801,47 +768,18 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
if b.Cluster.Spec.Networking.Canal != nil {
|
||||
key := "networking.projectcalico.org.canal"
|
||||
versions := map[string]string{
|
||||
"k8s-1.12": "3.7.5-kops.2",
|
||||
"k8s-1.15": "3.12.2-kops.1",
|
||||
"k8s-1.16": "3.13.4-kops.2",
|
||||
}
|
||||
{
|
||||
id := "k8s-1.12"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.15.0",
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
{
|
||||
id := "k8s-1.15"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.15.0 <1.16.0",
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
{
|
||||
id := "k8s-1.16"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.16.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -870,35 +808,19 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
key := "networking.amazon-vpc-routed-eni"
|
||||
|
||||
versions := map[string]string{
|
||||
"k8s-1.12": "1.5.5-kops.1",
|
||||
"k8s-1.16": "1.7.8-kops.1",
|
||||
}
|
||||
|
||||
{
|
||||
id := "k8s-1.12"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.16.0",
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
|
||||
{
|
||||
id := "k8s-1.16"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.16.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -958,12 +880,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
|
|||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Manifest: fi.String(location),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
KubernetesVersion: ">=1.15.0",
|
||||
Id: id,
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Manifest: fi.String(location),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -34,22 +34,7 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *api.Addons) error {
|
|||
}
|
||||
|
||||
key := "networking.cilium.io"
|
||||
if ver.Minor < 8 {
|
||||
version := "1.7.3-kops.1"
|
||||
|
||||
{
|
||||
id := "k8s-1.12"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &api.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: networkingSelector(),
|
||||
Manifest: fi.String(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
} else if ver.Minor == 8 {
|
||||
if ver.Minor < 9 {
|
||||
version := "1.8.0-kops.1"
|
||||
{
|
||||
id := "k8s-1.12"
|
||||
|
|
|
@ -44,12 +44,12 @@ func TestBootstrapChannelBuilder_BuildTasks(t *testing.T) {
|
|||
|
||||
h.SetupMockAWS()
|
||||
|
||||
runChannelBuilderTest(t, "simple", []string{"dns-controller.addons.k8s.io-k8s-1.12", "kops-controller.addons.k8s.io-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "simple", []string{"kops-controller.addons.k8s.io-k8s-1.16"})
|
||||
// Use cilium networking, proxy
|
||||
runChannelBuilderTest(t, "cilium", []string{"dns-controller.addons.k8s.io-k8s-1.12", "kops-controller.addons.k8s.io-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "cilium", []string{"kops-controller.addons.k8s.io-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "weave", []string{})
|
||||
runChannelBuilderTest(t, "amazonvpc", []string{"networking.amazon-vpc-routed-eni-k8s-1.12", "networking.amazon-vpc-routed-eni-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "amazonvpc-containerd", []string{"networking.amazon-vpc-routed-eni-k8s-1.12", "networking.amazon-vpc-routed-eni-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "amazonvpc", []string{"networking.amazon-vpc-routed-eni-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "amazonvpc-containerd", []string{"networking.amazon-vpc-routed-eni-k8s-1.16"})
|
||||
runChannelBuilderTest(t, "awsiamauthenticator", []string{"authentication.aws-k8s-1.12"})
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"k8s.io/klog/v2"
|
||||
kopsapi "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/hashing"
|
||||
|
@ -35,10 +34,6 @@ import (
|
|||
// https://github.com/kubernetes/kubernetes/issues/30338
|
||||
|
||||
const (
|
||||
// defaultCNIAssetAmd64K8s_11 is the CNI tarball for k8s >= 1.11
|
||||
defaultCNIAssetAmd64K8s_11 = "https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz"
|
||||
defaultCNIAssetArm64K8s_11 = "https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz"
|
||||
|
||||
// defaultCNIAssetAmd64K8s_15 is the CNI tarball for k8s >= 1.15
|
||||
defaultCNIAssetAmd64K8s_15 = "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz"
|
||||
defaultCNIAssetArm64K8s_15 = "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz"
|
||||
|
@ -85,25 +80,12 @@ func findCNIAssets(c *kopsapi.Cluster, assetBuilder *assets.AssetBuilder, arch a
|
|||
return u, h, nil
|
||||
}
|
||||
|
||||
sv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to find Kubernetes version: %v", err)
|
||||
}
|
||||
|
||||
switch arch {
|
||||
case architectures.ArchitectureAmd64:
|
||||
if util.IsKubernetesGTE("1.15", *sv) {
|
||||
cniAssetURL = defaultCNIAssetAmd64K8s_15
|
||||
} else {
|
||||
cniAssetURL = defaultCNIAssetAmd64K8s_11
|
||||
}
|
||||
cniAssetURL = defaultCNIAssetAmd64K8s_15
|
||||
klog.V(2).Infof("Adding default ARM64 CNI plugin binaries asset: %s", cniAssetURL)
|
||||
case architectures.ArchitectureArm64:
|
||||
if util.IsKubernetesGTE("1.15", *sv) {
|
||||
cniAssetURL = defaultCNIAssetArm64K8s_15
|
||||
} else {
|
||||
cniAssetURL = defaultCNIAssetArm64K8s_11
|
||||
}
|
||||
cniAssetURL = defaultCNIAssetArm64K8s_15
|
||||
klog.V(2).Infof("Adding default AMD64 CNI plugin binaries asset: %s", cniAssetURL)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unknown arch for CNI plugin binaries asset: %s", arch)
|
||||
|
|
|
@ -350,7 +350,7 @@ func TestPopulateCluster_APIServerCount(t *testing.T) {
|
|||
|
||||
func TestPopulateCluster_AnonymousAuth(t *testing.T) {
|
||||
cloud, c := buildMinimalCluster()
|
||||
c.Spec.KubernetesVersion = "1.15.0"
|
||||
c.Spec.KubernetesVersion = "1.20.0"
|
||||
|
||||
err := PerformAssignments(c, cloud)
|
||||
if err != nil {
|
||||
|
@ -376,18 +376,6 @@ func TestPopulateCluster_DockerVersion(t *testing.T) {
|
|||
KubernetesVersion string
|
||||
DockerVersion string
|
||||
}{
|
||||
{
|
||||
KubernetesVersion: "1.13.0",
|
||||
DockerVersion: "18.06.3",
|
||||
},
|
||||
{
|
||||
KubernetesVersion: "1.15.6",
|
||||
DockerVersion: "18.06.3",
|
||||
},
|
||||
{
|
||||
KubernetesVersion: "1.16.0",
|
||||
DockerVersion: "18.09.9",
|
||||
},
|
||||
{
|
||||
KubernetesVersion: "1.17.0",
|
||||
DockerVersion: "19.03.15",
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
name: master-us-test-1a
|
||||
name: events
|
||||
iam: {}
|
||||
kubernetesVersion: v1.16.0
|
||||
kubernetesVersion: v1.20.0
|
||||
masterInternalName: api.internal.minimal.example.com
|
||||
masterPublicName: api.minimal.example.com
|
||||
additionalSans:
|
||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
|||
name: master-us-test-1a
|
||||
name: events
|
||||
iam: {}
|
||||
kubernetesVersion: v1.16.0
|
||||
kubernetesVersion: v1.20.0
|
||||
masterInternalName: api.internal.minimal.example.com
|
||||
masterPublicName: api.minimal.example.com
|
||||
additionalSans:
|
||||
|
|
Loading…
Reference in New Issue