Remove code supporting dropped k8s versions

This commit is contained in:
John Gardiner Myers 2020-05-31 12:41:00 -07:00
parent 07f5e58cac
commit e88e0cf7ec
26 changed files with 63 additions and 523 deletions

View File

@ -46,7 +46,6 @@ import (
"k8s.io/kops/pkg/commands"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/k8sversion"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
@ -1143,13 +1142,8 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
cluster.Spec.MasterPublicName = c.MasterPublicName
}
kv, err := k8sversion.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return fmt.Errorf("failed to parse kubernetes version: %s", err.Error())
}
// check if we should set anonymousAuth to false on k8s versions >=1.11
if kv.IsGTE("1.11") {
// check if we should set anonymousAuth to false
{
if cluster.Spec.Kubelet == nil {
cluster.Spec.Kubelet = &api.KubeletConfigSpec{}
}

View File

@ -32,8 +32,8 @@ var (
`))
setExample = templates.Examples(i18n.T(`
# Set cluster to run kubernetes version 1.10.0
kops set cluster k8s-cluster.example.com spec.kubernetesVersion=1.10.0
# Set cluster to run kubernetes version 1.17.0
kops set cluster k8s-cluster.example.com spec.kubernetesVersion=1.17.0
`))
)

View File

@ -38,8 +38,8 @@ var (
kops set does not update the cloud resources; to apply the changes use "kops update cluster".`))
setClusterExample = templates.Examples(i18n.T(`
# Set cluster to run kubernetes version 1.10.0
kops set cluster k8s.cluster.site spec.kubernetesVersion=1.10.0
# Set cluster to run kubernetes version 1.17.0
kops set cluster k8s.cluster.site spec.kubernetesVersion=1.17.0
`))
)

View File

@ -33,10 +33,6 @@ spec:
## AWS IAM Authenticator
:exclamation:AWS IAM Authenticator requires Kops 1.10 or newer and Kubernetes 1.10 or newer
To turn on AWS IAM Authenticator, you'll need to add the stanza bellow
to your cluster configuration.

View File

@ -17,8 +17,8 @@ Note: if you already have kops installed, you need to substitute `upgrade` for `
You can switch between installed releases with:
```bash
brew switch kops 1.9.0
brew switch kops 1.10.0
brew switch kops 1.17.0
brew switch kops 1.18.0
```
# Releasing kops to Brew

View File

@ -75,4 +75,4 @@ Upgrade uses the latest Kubernetes version considered stable by kops, defined in
* `kops rolling-update cluster $NAME` to preview, then `kops rolling-update cluster $NAME --yes`
### Other Notes:
* In general, we recommend that you upgrade your cluster one minor release at a time (1.7 --> 1.8 --> 1.9). Although jumping minor versions may work if you have not enabled alpha features, you run a greater risk of running into problems due to version deprecation.
* In general, we recommend that you upgrade your cluster one minor release at a time (1.17 --> 1.18 --> 1.19). Although jumping minor versions may work if you have not enabled alpha features, you run a greater risk of running into problems due to version deprecation.

View File

@ -1,12 +1,12 @@
# Upgrading kubernetes
Upgrading kubernetes is very easy with kops, as long as you are using a compatible version of kops.
The kops `1.8.x` series (for example) supports the kubernetes 1.6, 1.7 and 1.8 series,
The kops `1.18.x` series (for example) supports the kubernetes 1.16, 1.17 and 1.18 series,
as per the kubernetes deprecation policy. Older versions of kubernetes will likely still work, but these
are on a best-effort basis and will have little if any testing. kops `1.8` will not support the kubernetes
`1.9` series, and for full support of kubernetes `1.9` it is best to wait for the kops `1.9` series release.
are on a best-effort basis and will have little if any testing. kops `1.18` will not support the kubernetes
`1.19` series, and for full support of kubernetes `1.19` it is best to wait for the kops `1.19` series release.
We aim to release the next major version of kops within a few weeks of the equivalent major release of kubernetes,
so kops `1.9.0` will be released within a few weeks of kubernetes `1.9.0`. We try to ensure that a 1.9 pre-release
so kops `1.19.0` will be released within a few weeks of kubernetes `1.19.0`. We try to ensure that a 1.19 pre-release
(alpha or beta) is available at the kubernetes release, for early adopters.
Upgrading kubernetes is similar to changing the image on an InstanceGroup, except that the kubernetes version is
@ -45,7 +45,7 @@ spec:
legacy: false
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: 1.7.2
kubernetesVersion: 1.17.2
masterInternalName: api.internal.simple.k8s.local
masterPublicName: api.simple.k8s.local
networking:
@ -65,7 +65,7 @@ spec:
nodes: public
```
Edit `kubernetesVersion`, changing it to `1.7.7` for example.
Edit `kubernetesVersion`, changing it to `1.17.7` for example.
Apply the changes to the cloud infrastructure using `kops update cluster` and `kops update cluster --yes`:
@ -118,10 +118,10 @@ Restart the instances with `kops rolling-update cluster --yes`.
```
> kubectl get nodes -owide
NAME STATUS AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION
master-us-central1-a-8fcc Ready 26m v1.7.7 35.194.56.129 Container-Optimized OS from Google 4.4.35+
nodes-9cml Ready 16m v1.7.7 35.193.12.73 Ubuntu 16.04.3 LTS 4.10.0-35-generic
nodes-km98 Ready 10m v1.7.7 35.194.25.144 Ubuntu 16.04.3 LTS 4.10.0-35-generic
nodes-wbb2 Ready 2m v1.7.7 35.188.177.16 Ubuntu 16.04.3 LTS 4.10.0-35-generic
master-us-central1-a-8fcc Ready 26m v1.17.7 35.194.56.129 Container-Optimized OS from Google 4.4.35+
nodes-9cml Ready 16m v1.17.7 35.193.12.73 Ubuntu 16.04.3 LTS 4.10.0-35-generic
nodes-km98 Ready 10m v1.17.7 35.194.25.144 Ubuntu 16.04.3 LTS 4.10.0-35-generic
nodes-wbb2 Ready 2m v1.17.7 35.188.177.16 Ubuntu 16.04.3 LTS 4.10.0-35-generic
```
<!-- TODO: Do we drain, validate and then restart -->

View File

@ -824,10 +824,8 @@ func (b *DockerBuilder) buildSystemdService(dockerVersionMajor int, dockerVersio
//# Uncomment TasksMax if your systemd version supports it.
//# Only systemd 226 and above support this version.
//#TasksMax=infinity
if b.IsKubernetesGTE("1.10") {
// Equivalent of https://github.com/kubernetes/kubernetes/pull/51986
manifest.Set("Service", "TasksMax", "infinity")
}
// Equivalent of https://github.com/kubernetes/kubernetes/pull/51986
manifest.Set("Service", "TasksMax", "infinity")
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
@ -913,10 +911,8 @@ func (b *DockerBuilder) buildContainerOSConfigurationDropIn(c *fi.ModelBuilderCo
"EnvironmentFile=/etc/environment",
}
if b.IsKubernetesGTE("1.10") {
// Equivalent of https://github.com/kubernetes/kubernetes/pull/51986
lines = append(lines, "TasksMax=infinity")
}
// Equivalent of https://github.com/kubernetes/kubernetes/pull/51986
lines = append(lines, "TasksMax=infinity")
contents := strings.Join(lines, "\n")

View File

@ -359,7 +359,7 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
// most people will still have c.Spec.KubeAPIServer.AdmissionControl references into their configuration we need
// to fix up. A PR https://github.com/kubernetes/kops/pull/5221/ introduced the issue and since the command line
// flags are mutually exclusive the API refuses to come up.
if b.IsKubernetesGTE("1.10") {
{
// @note: note sure if this is the best place to put it, I could place into the validation.go which has the benefit of
// fixing up the manifests itself, but that feels VERY hacky
// @note: it's fine to use AdmissionControl here and it's not populated by the model, thus the only data could have come from the cluster spec

View File

@ -183,11 +183,9 @@ func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.Kubelet
if kubeletConfig.ExperimentalAllowedUnsafeSysctls != nil {
// The ExperimentalAllowedUnsafeSysctls flag was renamed in k/k #63717
if b.IsKubernetesGTE("1.11") {
klog.V(1).Info("ExperimentalAllowedUnsafeSysctls was renamed in k8s 1.11+, please use AllowedUnsafeSysctls instead.")
kubeletConfig.AllowedUnsafeSysctls = append(kubeletConfig.ExperimentalAllowedUnsafeSysctls, kubeletConfig.AllowedUnsafeSysctls...)
kubeletConfig.ExperimentalAllowedUnsafeSysctls = nil
}
klog.V(1).Info("ExperimentalAllowedUnsafeSysctls was renamed in k8s 1.11+, please use AllowedUnsafeSysctls instead.")
kubeletConfig.AllowedUnsafeSysctls = append(kubeletConfig.ExperimentalAllowedUnsafeSysctls, kubeletConfig.AllowedUnsafeSysctls...)
kubeletConfig.ExperimentalAllowedUnsafeSysctls = nil
}
// TODO: Dump the separate file for flags - just complexity!

View File

@ -436,12 +436,10 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
// KubeAPIServer
if c.Spec.KubeAPIServer != nil {
if c.IsKubernetesGTE("1.10") {
if len(c.Spec.KubeAPIServer.AdmissionControl) > 0 {
if len(c.Spec.KubeAPIServer.DisableAdmissionPlugins) > 0 {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "disableAdmissionPlugins"),
"disableAdmissionPlugins is mutually exclusive, you cannot use both admissionControl and disableAdmissionPlugins together"))
}
if len(c.Spec.KubeAPIServer.AdmissionControl) > 0 {
if len(c.Spec.KubeAPIServer.DisableAdmissionPlugins) > 0 {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "disableAdmissionPlugins"),
"disableAdmissionPlugins is mutually exclusive, you cannot use both admissionControl and disableAdmissionPlugins together"))
}
}
}
@ -572,7 +570,7 @@ func validateKubelet(k *kops.KubeletConfigSpec, c *kops.Cluster, kubeletPath *fi
}
}
if c.IsKubernetesGTE("1.10") {
{
// Flag removed in 1.10
if k.RequireKubeconfig != nil {
allErrs = append(allErrs, field.Forbidden(

View File

@ -147,15 +147,6 @@ func (a *AssetBuilder) RemapImage(image string) (string, error) {
asset.DockerImage = image
// The k8s.gcr.io prefix is an alias, but for CI builds we run from a docker load,
// and we only double-tag from 1.10 onwards.
// For versions prior to 1.10, remap k8s.gcr.io to the old name.
// This also means that we won't start using the aliased names on existing clusters,
// which could otherwise be surprising to users.
if !util.IsKubernetesGTE("1.10", a.KubernetesVersion) && strings.HasPrefix(image, "k8s.gcr.io/") {
image = "gcr.io/google_containers/" + strings.TrimPrefix(image, "k8s.gcr.io/")
}
if strings.HasPrefix(image, "kope/dns-controller:") {
// To use user-defined DNS Controller:
// 1. DOCKER_REGISTRY=[your docker hub repo] make dns-controller-push
@ -210,11 +201,7 @@ func (a *AssetBuilder) RemapImage(image string) (string, error) {
normalized := image
// Remove the 'standard' kubernetes image prefix, just for sanity
if !util.IsKubernetesGTE("1.10", a.KubernetesVersion) && strings.HasPrefix(normalized, "gcr.io/google_containers/") {
normalized = strings.TrimPrefix(normalized, "gcr.io/google_containers/")
} else {
normalized = strings.TrimPrefix(normalized, "k8s.gcr.io/")
}
normalized = strings.TrimPrefix(normalized, "k8s.gcr.io/")
// When assembling the cluster spec, kops may call the option more then once until the config converges
// This means that this function may me called more than once on the same image

View File

@ -86,11 +86,9 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
} else if clusterSpec.Authorization.RBAC != nil {
var modes []string
if b.IsKubernetesGTE("1.10") {
if fi.BoolValue(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) {
// Enable the Node authorizer, used for special per-node RBAC policies
modes = append(modes, "Node")
}
if fi.BoolValue(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) {
// Enable the Node authorizer, used for special per-node RBAC policies
modes = append(modes, "Node")
}
modes = append(modes, "RBAC")
@ -164,12 +162,8 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
c.LogLevel = 2
c.SecurePort = 443
if b.IsKubernetesGTE("1.10") {
c.BindAddress = "0.0.0.0"
c.InsecureBindAddress = "127.0.0.1"
} else {
c.Address = "127.0.0.1"
}
c.BindAddress = "0.0.0.0"
c.InsecureBindAddress = "127.0.0.1"
c.AllowPrivileged = fi.Bool(true)
c.ServiceClusterIPRange = clusterSpec.ServiceClusterIPRange
@ -177,24 +171,9 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
c.EtcdServersOverrides = []string{"/events#http://127.0.0.1:4002"}
// TODO: We can probably rewrite these more clearly in descending order
if b.IsKubernetesLT("1.10") {
c.AdmissionControl = []string{
"Initializers",
"NamespaceLifecycle",
"LimitRanger",
"ServiceAccount",
"PersistentVolumeLabel",
"DefaultStorageClass",
"DefaultTolerationSeconds",
"MutatingAdmissionWebhook",
"ValidatingAdmissionWebhook",
"NodeRestriction",
"ResourceQuota",
}
}
// Based on recommendations from:
// https://kubernetes.io/docs/admin/admission-controllers/#is-there-a-recommended-set-of-admission-controllers-to-use
if b.IsKubernetesGTE("1.10") && b.IsKubernetesLT("1.12") {
if b.IsKubernetesLT("1.12") {
c.EnableAdmissionPlugins = []string{
"Initializers",
"NamespaceLifecycle",

View File

@ -44,11 +44,7 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o interface{}) error {
containerd := clusterSpec.Containerd
if clusterSpec.ContainerRuntime == "containerd" {
if b.IsKubernetesLT("1.11") {
// Containerd 1.2 is validated against Kubernetes v1.11+
// https://github.com/containerd/containerd/blob/master/releases/v1.2.0.toml#L34
return fmt.Errorf("kubernetes %s is not compatible with containerd", clusterSpec.KubernetesVersion)
} else if b.IsKubernetesLT("1.18") {
if b.IsKubernetesLT("1.18") {
klog.Warningf("kubernetes %s is untested with containerd", clusterSpec.KubernetesVersion)
}
@ -56,7 +52,7 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o interface{}) error {
if fi.StringValue(containerd.Version) == "" {
if b.IsKubernetesGTE("1.18") {
containerd.Version = fi.String("1.3.4")
} else if b.IsKubernetesGTE("1.11") {
} else {
return fmt.Errorf("containerd version is required")
}
}

View File

@ -194,15 +194,6 @@ func Image(component string, architecture string, clusterSpec *kops.ClusterSpec,
image := "k8s.gcr.io/" + imageName + ":" + tag
// When we're using a docker load-ed image, we are likely a CI build.
// But the k8s.gcr.io prefix is an alias, and we only double-tagged from 1.10 onwards.
// For versions prior to 1.10, remap k8s.gcr.io to the old name.
// This also means that we won't start using the aliased names on existing clusters,
// which could otherwise be surprising to users.
if !kubernetesVersion.IsGTE("1.10") {
image = "gcr.io/google_containers/" + strings.TrimPrefix(image, "k8s.gcr.io/")
}
return image, nil
}

View File

@ -73,17 +73,13 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error {
docker.IPMasq = fi.Bool(false)
// Note the alternative syntax... with a comma nodeup will try each of the filesystems in turn
if b.IsKubernetesGTE("1.11") {
// TODO(justinsb): figure out whether to use overlay2 on AWS jessie:
// The ContainerOS image now has docker configured to use overlay2 out-of-the-box
// and it is an error to specify the flag twice.
// But Jessie (still our default AWS image) isn't recommended by docker with overlay2
// (though that may be a kernel issue, and we run a custom kernel on our default image)
// But we still need to worry about users running generic AMIs (e.g. stock jessie)
docker.Storage = fi.String("overlay2,overlay,aufs")
} else {
docker.Storage = fi.String("overlay,aufs")
}
// TODO(justinsb): figure out whether to use overlay2 on AWS jessie:
// The ContainerOS image now has docker configured to use overlay2 out-of-the-box
// and it is an error to specify the flag twice.
// But Jessie (still our default AWS image) isn't recommended by docker with overlay2
// (though that may be a kernel issue, and we run a custom kernel on our default image)
// But we still need to worry about users running generic AMIs (e.g. stock jessie)
docker.Storage = fi.String("overlay2,overlay,aufs")
return nil
}

View File

@ -83,10 +83,8 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
c.Version = DefaultEtcd3Version_1_14
} else if b.IsKubernetesGTE("1.13") {
c.Version = DefaultEtcd3Version_1_13
} else if b.IsKubernetesGTE("1.11") {
c.Version = DefaultEtcd3Version_1_11
} else {
c.Version = DefaultEtcd2Version
c.Version = DefaultEtcd3Version_1_11
}
}

View File

@ -57,12 +57,7 @@ func (b *EtcdManagerOptionsBuilder) BuildOptions(o interface{}) error {
}
if etcdCluster.Version == "" {
if b.IsKubernetesGTE("1.11") {
etcdCluster.Version = "3.2.18"
} else {
// Preserve existing default etcd version
etcdCluster.Version = "2.2.1"
}
etcdCluster.Version = "3.2.18"
}
if !etcdVersionIsSupported(etcdCluster.Version) {

View File

@ -39,52 +39,22 @@ func TestImage(t *testing.T) {
Component: "kube-apiserver",
Cluster: &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "v1.9.0",
KubernetesVersion: "v1.11.0",
},
},
Expected: "gcr.io/google_containers/kube-apiserver:v1.9.0",
Expected: "k8s.gcr.io/kube-apiserver:v1.11.0",
},
{
Component: "kube-apiserver",
Cluster: &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "v1.10.0",
},
},
Expected: "k8s.gcr.io/kube-apiserver:v1.10.0",
},
{
Component: "kube-apiserver",
Cluster: &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "1.10.0",
},
},
Expected: "k8s.gcr.io/kube-apiserver:v1.10.0",
},
{
Component: "kube-apiserver",
Cluster: &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "memfs://v1.9.0-download/",
KubernetesVersion: "memfs://v1.11.0-download/",
},
},
VFS: map[string]string{
"memfs://v1.9.0-download/bin/linux/amd64/kube-apiserver.docker_tag": "1-9-0dockertag",
"memfs://v1.11.0-download/bin/linux/amd64/kube-apiserver.docker_tag": "1-11-0dockertag",
},
Expected: "gcr.io/google_containers/kube-apiserver:1-9-0dockertag",
},
{
Component: "kube-apiserver",
Cluster: &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "memfs://v1.10.0-download/",
},
},
VFS: map[string]string{
"memfs://v1.10.0-download/bin/linux/amd64/kube-apiserver.docker_tag": "1-10-0dockertag",
},
Expected: "k8s.gcr.io/kube-apiserver:1-10-0dockertag",
Expected: "k8s.gcr.io/kube-apiserver:1-11-0dockertag",
},
{
Component: "kube-apiserver",

View File

@ -152,10 +152,8 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
// @check if the node authorization is enabled and if so enable the tokencleaner controller (disabled by default)
// This is responsible for cleaning up bootstrap tokens which have expired
if b.Context.IsKubernetesGTE("1.10") {
if fi.BoolValue(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) && len(kcm.Controllers) <= 0 {
kcm.Controllers = []string{"*", "tokencleaner"}
}
if fi.BoolValue(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) && len(kcm.Controllers) <= 0 {
kcm.Controllers = []string{"*", "tokencleaner"}
}
return nil

View File

@ -31,12 +31,13 @@ func buildCluster() *api.Cluster {
Spec: api.ClusterSpec{
CloudProvider: "aws",
KubernetesVersion: "v1.14.0",
KubeAPIServer: &api.KubeAPIServerConfig{},
},
}
}
func Test_Build_KCM_Builder(t *testing.T) {
versions := []string{"v1.9.0", "v2.4.0"}
versions := []string{"v1.11.0", "v2.4.0"}
for _, v := range versions {
c := buildCluster()

View File

@ -1,229 +0,0 @@
# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.3.3/config/v1.3/aws-k8s-cni.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: aws-node
rules:
- apiGroups:
- crd.k8s.amazonaws.com
resources:
- "*"
- namespaces
verbs:
- "*"
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs: ["list", "watch", "get"]
- apiGroups: ["extensions"]
resources:
- daemonsets
verbs: ["list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aws-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-node
subjects:
- kind: ServiceAccount
name: aws-node
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: aws-node
namespace: kube-system
labels:
k8s-app: aws-node
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: aws-node
template:
metadata:
labels:
k8s-app: aws-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: aws-node
hostNetwork: true
tolerations:
- operator: Exists
containers:
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.3.3" }}"
ports:
- containerPort: 61678
name: metrics
name: aws-node
env:
- name: CLUSTER_NAME
value: {{ ClusterName }}
- name: AWS_VPC_K8S_CNI_LOGLEVEL
value: DEBUG
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- range .Networking.AmazonVPC.Env }}
- name: {{ .Name }}
value: "{{ .Value }}"
{{- end }}
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /host/var/log
name: log-dir
- mountPath: /var/run/docker.sock
name: dockersock
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: log-dir
hostPath:
path: /var/log
- name: dockersock
hostPath:
path: /var/run/docker.sock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: eniconfigs.crd.k8s.amazonaws.com
spec:
scope: Cluster
group: crd.k8s.amazonaws.com
version: v1alpha1
names:
plural: eniconfigs
singular: eniconfig
kind: ENIConfig
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
node-role.kubernetes.io/master: ""

View File

@ -1,78 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: kube-system
spec:
allowedCapabilities:
- '*'
fsGroup:
rule: RunAsAny
hostPID: true
hostIPC: true
hostNetwork: true
hostPorts:
- min: 1
max: 65536
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kops:kube-system:psp
rules:
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- kube-system
verbs:
- use
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kops:kube-system:psp
roleRef:
kind: ClusterRole
name: kops:kube-system:psp
apiGroup: rbac.authorization.k8s.io
subjects:
# permit the cluster wise admin to use this policy
- kind: Group
name: system:masters
apiGroup: rbac.authorization.k8s.io
# permit the kubelets to access this policy (used for manifests)
- kind: User
name: kubelet
apiGroup: rbac.authorization.k8s.io
## TODO: need to question whether this can move into a rolebinding?
{{- if UseBootstrapTokens }}
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
{{- end }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kops:kube-system:psp
namespace: kube-system
roleRef:
kind: ClusterRole
name: kops:kube-system:psp
apiGroup: rbac.authorization.k8s.io
subjects:
# permit the cluster wise admin to use this policy
- kind: Group
name: system:serviceaccounts:kube-system
apiGroup: rbac.authorization.k8s.io

View File

@ -24,8 +24,6 @@ import (
"path"
"strings"
"k8s.io/kops/pkg/k8sversion"
"github.com/blang/semver"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
@ -280,14 +278,8 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
cluster.Spec.KubernetesVersion = versionWithoutV
}
kv, err := k8sversion.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return err
}
// check if we should recommend turning off anonymousAuth on k8s versions gte than 1.10
// we do 1.10 since this is a really critical issues and 1.10 has it
if kv.IsGTE("1.10") {
// check if we should recommend turning off anonymousAuth
{
// we do a check here because setting modifying the kubelet object messes with the output
warn := false
if cluster.Spec.Kubelet == nil {

View File

@ -156,21 +156,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "podsecuritypolicy.addons.k8s.io"
version := "0.0.4"
{
location := key + "/k8s-1.9.yaml"
id := "k8s-1.9"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0 <1.10.0",
Id: id,
})
}
// In k8s v1.10, the PodSecurityPolicy API has been moved to the policy/v1beta1 API group
{
location := key + "/k8s-1.10.yaml"
id := "k8s-1.10"
@ -890,26 +875,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "networking.amazon-vpc-routed-eni"
versions := map[string]string{
"k8s-1.8": "1.5.0-kops.1",
"k8s-1.10": "1.5.0-kops.2",
"k8s-1.12": "1.5.5-kops.1",
"k8s-1.16": "1.6.0-kops.1",
}
{
id := "k8s-1.8"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.10.0",
Id: id,
})
}
{
id := "k8s-1.10"
location := key + "/" + id + ".yaml"
@ -1070,7 +1040,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Manifest: fi.String(location),
Selector: map[string]string{"k8s-addon": key},
KubernetesVersion: ">=1.11.0 <1.13.0",
KubernetesVersion: "<1.13.0",
Id: id,
})
}

View File

@ -34,10 +34,6 @@ import (
// https://github.com/kubernetes/kubernetes/issues/30338
const (
// defaultCNIAssetK8s1_9 is the CNI tarball for 1.9.x k8s.
defaultCNIAssetK8s1_9 = "https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz"
defaultCNIAssetHashStringK8s1_9 = "d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
// defaultCNIAssetK8s1_11 is the CNI tarball for k8s >= 1.11
defaultCNIAssetK8s1_11 = "https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz"
defaultCNIAssetSHA1StringK8s1_11 = "52e9d2de8a5f927307d9397308735658ee44ab8d"
@ -85,14 +81,10 @@ func findCNIAssets(c *kopsapi.Cluster, assetBuilder *assets.AssetBuilder) (*url.
cniAsset = defaultCNIAssetK8s1_15
cniAssetHash = defaultCNIAssetSHA256StringK8s1_15
klog.V(2).Infof("Adding default CNI asset for k8s >= 1.15: %s", cniAsset)
} else if util.IsKubernetesGTE("1.11", *sv) {
} else {
cniAsset = defaultCNIAssetK8s1_11
cniAssetHash = defaultCNIAssetSHA1StringK8s1_11
klog.V(2).Infof("Adding default CNI asset for 1.18 > k8s >= 1.11: %s", cniAsset)
} else {
cniAsset = defaultCNIAssetK8s1_9
cniAssetHash = defaultCNIAssetHashStringK8s1_9
klog.V(2).Infof("Adding default CNI asset for 1.11 > k8s >= 1.9: %s", cniAsset)
}
u, err := url.Parse(cniAsset)