From 970c9787907d9f26a57be498cc4fb0972126b09a Mon Sep 17 00:00:00 2001 From: Laurent Crisci Date: Wed, 24 Jul 2019 18:49:20 +0100 Subject: [PATCH 01/42] Fix template clusterName behavior This commit will allow you to define clusterName from the template's values file. Before that our clusterName value would be overriden by the cli flag ( whether defined or not ). Now we have an approach where if the value is defined, it will have priority over the cli flag. This commit will also align with the official documentation under https://github.com/kubernetes/kops/blob/master/docs/cluster_template.md --- cmd/kops/toolbox_template.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/kops/toolbox_template.go b/cmd/kops/toolbox_template.go index f16b2a48e9..2b279e9e12 100644 --- a/cmd/kops/toolbox_template.go +++ b/cmd/kops/toolbox_template.go @@ -111,7 +111,14 @@ func runToolBoxTemplate(f *util.Factory, out io.Writer, options *toolboxTemplate if err != nil { return err } - context["clusterName"] = options.clusterName + + // @step: set clusterName from template's values or cli flag + value, ok := context["clusterName"].(string) + if ok { + options.clusterName = value + } else { + context["clusterName"] = options.clusterName + } // @check if we are just rendering the config value if options.configValue != "" { From d81566482bc53022f32dc20a12e3e864c86a9701 Mon Sep 17 00:00:00 2001 From: Xiaoyu Zhong Date: Wed, 13 Nov 2019 10:52:32 +0800 Subject: [PATCH 02/42] Alicloud: etcd-manager support --- pkg/model/components/etcdmanager/BUILD.bazel | 1 + pkg/model/components/etcdmanager/model.go | 11 +++++++++++ pkg/model/master_volumes.go | 2 ++ 3 files changed, 14 insertions(+) diff --git a/pkg/model/components/etcdmanager/BUILD.bazel b/pkg/model/components/etcdmanager/BUILD.bazel index 8772b8b99c..62d5bb2395 100644 --- a/pkg/model/components/etcdmanager/BUILD.bazel +++ b/pkg/model/components/etcdmanager/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "//pkg/urls:go_default_library", "//pkg/wellknownports:go_default_library", "//upup/pkg/fi:go_default_library", + "//upup/pkg/fi/cloudup/aliup:go_default_library", "//upup/pkg/fi/cloudup/awsup:go_default_library", "//upup/pkg/fi/cloudup/do:go_default_library", "//upup/pkg/fi/cloudup/gce:go_default_library", diff --git a/pkg/model/components/etcdmanager/model.go b/pkg/model/components/etcdmanager/model.go index 10c895c079..0070992467 100644 --- a/pkg/model/components/etcdmanager/model.go +++ b/pkg/model/components/etcdmanager/model.go @@ -39,6 +39,7 @@ import ( "k8s.io/kops/pkg/model" "k8s.io/kops/pkg/wellknownports" "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/cloudup/aliup" "k8s.io/kops/upup/pkg/fi/cloudup/awsup" "k8s.io/kops/upup/pkg/fi/cloudup/do" "k8s.io/kops/upup/pkg/fi/cloudup/gce" @@ -371,6 +372,16 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Po } config.VolumeNameTag = awsup.TagNameEtcdClusterPrefix + etcdCluster.Name + case kops.CloudProviderALI: + config.VolumeProvider = "alicloud" + + config.VolumeTag = []string{ + fmt.Sprintf("kubernetes.io/cluster/%s=owned", b.Cluster.Name), + aliup.TagNameEtcdClusterPrefix + etcdCluster.Name, + aliup.TagNameRolePrefix + "master=1", + } + config.VolumeNameTag = aliup.TagNameEtcdClusterPrefix + etcdCluster.Name + case kops.CloudProviderGCE: config.VolumeProvider = "gce" diff --git a/pkg/model/master_volumes.go b/pkg/model/master_volumes.go index 3727a6e3cc..d3eb8498d8 100644 --- a/pkg/model/master_volumes.go +++ b/pkg/model/master_volumes.go @@ -308,6 +308,8 @@ func (b *MasterVolumeBuilder) addALIVolume(c *fi.ModelBuilderContext, name strin tags[aliup.TagNameEtcdClusterPrefix+etcd.Name] = m.Name + "/" + strings.Join(allMembers, ",") // This says "only mount on a master" tags[aliup.TagNameRolePrefix+"master"] = "1" + // We always add an owned tags (these can't be shared) + tags["kubernetes.io/cluster/"+b.Cluster.ObjectMeta.Name] = "owned" encrypted := fi.BoolValue(m.EncryptedVolume) From 4c2a4317e7f0ed5ffd824e4820909a840f49d798 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sun, 8 Dec 2019 16:21:29 -0800 Subject: [PATCH 03/42] Announce removal of kops/v1alpha1 in kops 1.18 --- docs/releases/1.17-NOTES.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/releases/1.17-NOTES.md b/docs/releases/1.17-NOTES.md index 26e85b49c0..8422e9c30e 100644 --- a/docs/releases/1.17-NOTES.md +++ b/docs/releases/1.17-NOTES.md @@ -21,6 +21,10 @@ the notes prior to the release). * No required actions yet known. +# Deprecations + +* The `kops/v1alpha1` API is deprecated and will be removed in kops 1.18. Users of `kops replace` will need to supply v1alpha2 resources. + # Full change list since 1.16.0 release ## 1.16.0-alpha.1 to 1.17.0-alpha.1 From d30f4ff99583169162e0b4e3a581bae05f71c6eb Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sun, 8 Dec 2019 16:21:35 -0800 Subject: [PATCH 04/42] Announce removal of kops/v1alpha1 in kops 1.18 --- docs/releases/1.16-NOTES.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/releases/1.16-NOTES.md b/docs/releases/1.16-NOTES.md index 3d2351cad8..422e134bd1 100644 --- a/docs/releases/1.16-NOTES.md +++ b/docs/releases/1.16-NOTES.md @@ -25,6 +25,10 @@ the notes prior to the release). * No required actions yet known. +# Deprecations + +* The `kops/v1alpha1` API is deprecated and will be removed in kops 1.18. Users of `kops replace` will need to supply v1alpha2 resources. + # Full change list since 1.15.0 release ## 1.15.0-alpha.1 to 1.16.0-alpha.1 From ce241bc4d1738d691a4d8f7e4cab537836dcfd89 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Mon, 11 Nov 2019 20:51:28 -0800 Subject: [PATCH 05/42] Add MaxUnavailable setting to cluster and instancegroup --- pkg/apis/kops/BUILD.bazel | 1 + pkg/apis/kops/cluster.go | 19 +++++++++++++++++++ pkg/apis/kops/instancegroup.go | 2 ++ pkg/apis/kops/v1alpha1/BUILD.bazel | 1 + pkg/apis/kops/v1alpha1/cluster.go | 19 +++++++++++++++++++ pkg/apis/kops/v1alpha1/instancegroup.go | 2 ++ pkg/apis/kops/v1alpha2/BUILD.bazel | 1 + pkg/apis/kops/v1alpha2/cluster.go | 19 +++++++++++++++++++ pkg/apis/kops/v1alpha2/instancegroup.go | 2 ++ 9 files changed, 66 insertions(+) diff --git a/pkg/apis/kops/BUILD.bazel b/pkg/apis/kops/BUILD.bazel index f0e62bfacf..8fec778d09 100644 --- a/pkg/apis/kops/BUILD.bazel +++ b/pkg/apis/kops/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index ac30d521ed..da65acc751 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kops/pkg/apis/kops/util" ) @@ -186,6 +187,8 @@ type ClusterSpec struct { // specified, each parameter must follow the form variable=value, the way // it would appear in sysctl.conf. SysctlParameters []string `json:"sysctlParameters,omitempty"` + // RollingUpdate defines the default rolling-update settings for instance groups + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` } // NodeAuthorizationSpec is used to node authorization @@ -654,3 +657,19 @@ type DNSControllerGossipConfig struct { Secondary *DNSControllerGossipConfig `json:"secondary,omitempty"` Seed *string `json:"seed,omitempty"` } + +type RollingUpdate struct { + // MaxUnavailable is the maximum number of nodes that can be unavailable during the update. + // The value can be an absolute number (for example 5) or a percentage of desired + // nodes (for example 10%). + // The absolute number is calculated from a percentage by rounding down. + // A value of 0 disables rolling updates. + // Defaults to 1. + // Example: when this is set to 30%, the InstanceGroup can be scaled + // down to 70% of desired nodes immediately when the rolling update + // starts. Once new nodes are ready, more old nodes can be drained, + // ensuring that the total number of nodes available at all times + // during the update is at least 70% of desired nodes. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` +} diff --git a/pkg/apis/kops/instancegroup.go b/pkg/apis/kops/instancegroup.go index 116a9b017d..89203f5cce 100644 --- a/pkg/apis/kops/instancegroup.go +++ b/pkg/apis/kops/instancegroup.go @@ -159,6 +159,8 @@ type InstanceGroupSpec struct { // specified, each parameter must follow the form variable=value, the way // it would appear in sysctl.conf. SysctlParameters []string `json:"sysctlParameters,omitempty"` + // RollingUpdate defines the rolling-update behavior + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` } const ( diff --git a/pkg/apis/kops/v1alpha1/BUILD.bazel b/pkg/apis/kops/v1alpha1/BUILD.bazel index 2b021fc41f..b4560c234a 100644 --- a/pkg/apis/kops/v1alpha1/BUILD.bazel +++ b/pkg/apis/kops/v1alpha1/BUILD.bazel @@ -29,6 +29,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/apis/kops/v1alpha1/cluster.go b/pkg/apis/kops/v1alpha1/cluster.go index b22e84e8cf..3ffff40766 100644 --- a/pkg/apis/kops/v1alpha1/cluster.go +++ b/pkg/apis/kops/v1alpha1/cluster.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // +genclient @@ -184,6 +185,8 @@ type ClusterSpec struct { // specified, each parameter must follow the form variable=value, the way // it would appear in sysctl.conf. SysctlParameters []string `json:"sysctlParameters,omitempty"` + // RollingUpdate defines the default rolling-update settings for instance groups + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` } // NodeAuthorizationSpec is used to node authorization @@ -538,3 +541,19 @@ type DNSControllerGossipConfig struct { Secondary *DNSControllerGossipConfig `json:"secondary,omitempty"` Seed *string `json:"seed,omitempty"` } + +type RollingUpdate struct { + // MaxUnavailable is the maximum number of nodes that can be unavailable during the update. + // The value can be an absolute number (for example 5) or a percentage of desired + // nodes (for example 10%). + // The absolute number is calculated from a percentage by rounding down. + // A value of 0 disables rolling updates. + // Defaults to 1. + // Example: when this is set to 30%, the InstanceGroup can be scaled + // down to 70% of desired nodes immediately when the rolling update + // starts. Once new nodes are ready, more old nodes can be drained, + // ensuring that the total number of nodes available at all times + // during the update is at least 70% of desired nodes. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` +} diff --git a/pkg/apis/kops/v1alpha1/instancegroup.go b/pkg/apis/kops/v1alpha1/instancegroup.go index c762fa640a..0e4acdbd65 100644 --- a/pkg/apis/kops/v1alpha1/instancegroup.go +++ b/pkg/apis/kops/v1alpha1/instancegroup.go @@ -146,6 +146,8 @@ type InstanceGroupSpec struct { // specified, each parameter must follow the form variable=value, the way // it would appear in sysctl.conf. SysctlParameters []string `json:"sysctlParameters,omitempty"` + // RollingUpdate defines the rolling-update behavior + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` } const ( diff --git a/pkg/apis/kops/v1alpha2/BUILD.bazel b/pkg/apis/kops/v1alpha2/BUILD.bazel index 21341d46a6..83715fdd75 100644 --- a/pkg/apis/kops/v1alpha2/BUILD.bazel +++ b/pkg/apis/kops/v1alpha2/BUILD.bazel @@ -29,6 +29,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 02e9df9043..cc928ff0c8 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -19,6 +19,7 @@ package v1alpha2 import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // +genclient @@ -184,6 +185,8 @@ type ClusterSpec struct { // specified, each parameter must follow the form variable=value, the way // it would appear in sysctl.conf. SysctlParameters []string `json:"sysctlParameters,omitempty"` + // RollingUpdate defines the default rolling-update settings for instance groups + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` } // NodeAuthorizationSpec is used to node authorization @@ -551,3 +554,19 @@ type DNSControllerGossipConfig struct { Secondary *DNSControllerGossipConfig `json:"secondary,omitempty"` Seed *string `json:"seed,omitempty"` } + +type RollingUpdate struct { + // MaxUnavailable is the maximum number of nodes that can be unavailable during the update. + // The value can be an absolute number (for example 5) or a percentage of desired + // nodes (for example 10%). + // The absolute number is calculated from a percentage by rounding down. + // A value of 0 disables rolling updates. + // Defaults to 1. + // Example: when this is set to 30%, the InstanceGroup can be scaled + // down to 70% of desired nodes immediately when the rolling update + // starts. Once new nodes are ready, more old nodes can be drained, + // ensuring that the total number of nodes available at all times + // during the update is at least 70% of desired nodes. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` +} diff --git a/pkg/apis/kops/v1alpha2/instancegroup.go b/pkg/apis/kops/v1alpha2/instancegroup.go index bfd47be33a..021e6a9a12 100644 --- a/pkg/apis/kops/v1alpha2/instancegroup.go +++ b/pkg/apis/kops/v1alpha2/instancegroup.go @@ -153,6 +153,8 @@ type InstanceGroupSpec struct { // specified, each parameter must follow the form variable=value, the way // it would appear in sysctl.conf. SysctlParameters []string `json:"sysctlParameters,omitempty"` + // RollingUpdate defines the rolling-update behavior + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` } const ( From 855cd1fe67ab7461fe3a5b16ab446c8f87cd696e Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sat, 4 Jan 2020 09:37:36 -0800 Subject: [PATCH 06/42] make apimachinery --- .../kops/v1alpha1/zz_generated.conversion.go | 66 +++++++++++++++++++ .../kops/v1alpha1/zz_generated.deepcopy.go | 32 +++++++++ .../kops/v1alpha2/zz_generated.conversion.go | 66 +++++++++++++++++++ .../kops/v1alpha2/zz_generated.deepcopy.go | 32 +++++++++ pkg/apis/kops/zz_generated.deepcopy.go | 32 +++++++++ 5 files changed, 228 insertions(+) diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index 4eb618d39f..5220b2bc4a 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -693,6 +693,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*RollingUpdate)(nil), (*kops.RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(a.(*RollingUpdate), b.(*kops.RollingUpdate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.RollingUpdate)(nil), (*RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(a.(*kops.RollingUpdate), b.(*RollingUpdate), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*RomanaNetworkingSpec)(nil), (*kops.RomanaNetworkingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(a.(*RomanaNetworkingSpec), b.(*kops.RomanaNetworkingSpec), scope) }); err != nil { @@ -1878,6 +1888,15 @@ func autoConvert_v1alpha1_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } out.UseHostCertificates = in.UseHostCertificates out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(kops.RollingUpdate) + if err := Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -2176,6 +2195,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha1_ClusterSpec(in *kops.ClusterSpec, } out.UseHostCertificates = in.UseHostCertificates out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + if err := Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -3058,6 +3086,15 @@ func autoConvert_v1alpha1_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan out.SecurityGroupOverride = in.SecurityGroupOverride out.InstanceProtection = in.InstanceProtection out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(kops.RollingUpdate) + if err := Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -3181,6 +3218,15 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha1_InstanceGroupSpec(in *kops.I out.SecurityGroupOverride = in.SecurityGroupOverride out.InstanceProtection = in.InstanceProtection out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + if err := Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -4602,6 +4648,26 @@ func Convert_kops_RBACAuthorizationSpec_To_v1alpha1_RBACAuthorizationSpec(in *ko return autoConvert_kops_RBACAuthorizationSpec_To_v1alpha1_RBACAuthorizationSpec(in, out, s) } +func autoConvert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error { + out.MaxUnavailable = in.MaxUnavailable + return nil +} + +// Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate is an autogenerated conversion function. +func Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error { + return autoConvert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(in, out, s) +} + +func autoConvert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error { + out.MaxUnavailable = in.MaxUnavailable + return nil +} + +// Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate is an autogenerated conversion function. +func Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error { + return autoConvert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(in, out, s) +} + func autoConvert_v1alpha1_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(in *RomanaNetworkingSpec, out *kops.RomanaNetworkingSpec, s conversion.Scope) error { out.DaemonServiceIP = in.DaemonServiceIP out.EtcdServiceIP = in.EtcdServiceIP diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index f0e9d888bb..924c5d6e1d 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1alpha1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -813,6 +814,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } return } @@ -1733,6 +1739,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } return } @@ -3229,6 +3240,27 @@ func (in *RBACAuthorizationSpec) DeepCopy() *RBACAuthorizationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. +func (in *RollingUpdate) DeepCopy() *RollingUpdate { + if in == nil { + return nil + } + out := new(RollingUpdate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RomanaNetworkingSpec) DeepCopyInto(out *RomanaNetworkingSpec) { *out = *in diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index f4ca303e9f..544051e6de 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -753,6 +753,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*RollingUpdate)(nil), (*kops.RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(a.(*RollingUpdate), b.(*kops.RollingUpdate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.RollingUpdate)(nil), (*RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(a.(*kops.RollingUpdate), b.(*RollingUpdate), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*RomanaNetworkingSpec)(nil), (*kops.RomanaNetworkingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(a.(*RomanaNetworkingSpec), b.(*kops.RomanaNetworkingSpec), scope) }); err != nil { @@ -1931,6 +1941,15 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } out.UseHostCertificates = in.UseHostCertificates out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(kops.RollingUpdate) + if err := Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -2244,6 +2263,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, } out.UseHostCertificates = in.UseHostCertificates out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + if err := Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -3176,6 +3204,15 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan out.SecurityGroupOverride = in.SecurityGroupOverride out.InstanceProtection = in.InstanceProtection out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(kops.RollingUpdate) + if err := Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -3304,6 +3341,15 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I out.SecurityGroupOverride = in.SecurityGroupOverride out.InstanceProtection = in.InstanceProtection out.SysctlParameters = in.SysctlParameters + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + if err := Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } return nil } @@ -4872,6 +4918,26 @@ func Convert_kops_RBACAuthorizationSpec_To_v1alpha2_RBACAuthorizationSpec(in *ko return autoConvert_kops_RBACAuthorizationSpec_To_v1alpha2_RBACAuthorizationSpec(in, out, s) } +func autoConvert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error { + out.MaxUnavailable = in.MaxUnavailable + return nil +} + +// Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate is an autogenerated conversion function. +func Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error { + return autoConvert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(in, out, s) +} + +func autoConvert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error { + out.MaxUnavailable = in.MaxUnavailable + return nil +} + +// Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate is an autogenerated conversion function. +func Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error { + return autoConvert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(in, out, s) +} + func autoConvert_v1alpha2_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(in *RomanaNetworkingSpec, out *kops.RomanaNetworkingSpec, s conversion.Scope) error { out.DaemonServiceIP = in.DaemonServiceIP out.EtcdServiceIP = in.EtcdServiceIP diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 57d9723eea..7b4523b123 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1alpha2 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -786,6 +787,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } return } @@ -1695,6 +1701,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } return } @@ -3300,6 +3311,27 @@ func (in *RBACAuthorizationSpec) DeepCopy() *RBACAuthorizationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. +func (in *RollingUpdate) DeepCopy() *RollingUpdate { + if in == nil { + return nil + } + out := new(RollingUpdate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RomanaNetworkingSpec) DeepCopyInto(out *RomanaNetworkingSpec) { *out = *in diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index 61719aafd8..0d37a37cc8 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package kops import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -886,6 +887,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } return } @@ -1861,6 +1867,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } return } @@ -3514,6 +3525,27 @@ func (in *RBACAuthorizationSpec) DeepCopy() *RBACAuthorizationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. +func (in *RollingUpdate) DeepCopy() *RollingUpdate { + if in == nil { + return nil + } + out := new(RollingUpdate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RomanaNetworkingSpec) DeepCopyInto(out *RomanaNetworkingSpec) { *out = *in From d6ceffed3669882f6fa7dbf44f2d795fbe0106e4 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Wed, 27 Nov 2019 17:04:15 -0800 Subject: [PATCH 07/42] make crds --- k8s/crds/kops.k8s.io_clusters.yaml | 19 +++++++++++++++++++ k8s/crds/kops.k8s.io_instancegroups.yaml | 18 ++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index c396e5604c..b643f781cf 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -2870,6 +2870,25 @@ spec: description: Project is the cloud project we should use, required on GCE type: string + rollingUpdate: + description: RollingUpdate defines the default rolling-update settings + for instance groups + properties: + maxUnavailable: + anyOf: + - type: string + - type: integer + description: 'MaxUnavailable is the maximum number of nodes that + can be unavailable during the update. The value can be an absolute + number (for example 5) or a percentage of desired nodes (for example + 10%). The absolute number is calculated from a percentage by rounding + down. A value of 0 disables rolling updates. Defaults to 1. Example: + when this is set to 30%, the InstanceGroup can be scaled down + to 70% of desired nodes immediately when the rolling update starts. + Once new nodes are ready, more old nodes can be drained, ensuring + that the total number of nodes available at all times during the + update is at least 70% of desired nodes.' + type: object secretStore: description: SecretStore is the VFS path to where secrets are stored type: string diff --git a/k8s/crds/kops.k8s.io_instancegroups.yaml b/k8s/crds/kops.k8s.io_instancegroups.yaml index 566bc566ad..35e0f46e3b 100644 --- a/k8s/crds/kops.k8s.io_instancegroups.yaml +++ b/k8s/crds/kops.k8s.io_instancegroups.yaml @@ -624,6 +624,24 @@ spec: description: 'Type determines the role of instances in this group: masters or nodes' type: string + rollingUpdate: + description: RollingUpdate defines the rolling-update behavior + properties: + maxUnavailable: + anyOf: + - type: string + - type: integer + description: 'MaxUnavailable is the maximum number of nodes that + can be unavailable during the update. The value can be an absolute + number (for example 5) or a percentage of desired nodes (for example + 10%). The absolute number is calculated from a percentage by rounding + down. A value of 0 disables rolling updates. Defaults to 1. Example: + when this is set to 30%, the InstanceGroup can be scaled down + to 70% of desired nodes immediately when the rolling update starts. + Once new nodes are ready, more old nodes can be drained, ensuring + that the total number of nodes available at all times during the + update is at least 70% of desired nodes.' + type: object rootVolumeDeleteOnTermination: description: 'RootVolumeDeleteOnTermination configures root volume retention policy upon instance termination. The root volume is deleted by default. From adaf903b900613914e8411a22fd6134797b5e586 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Tue, 12 Nov 2019 22:25:45 -0800 Subject: [PATCH 08/42] Create resolveSettings --- pkg/instancegroups/BUILD.bazel | 8 +- pkg/instancegroups/settings.go | 56 ++++++++++ pkg/instancegroups/settings_test.go | 167 ++++++++++++++++++++++++++++ 3 files changed, 230 insertions(+), 1 deletion(-) create mode 100644 pkg/instancegroups/settings.go create mode 100644 pkg/instancegroups/settings_test.go diff --git a/pkg/instancegroups/BUILD.bazel b/pkg/instancegroups/BUILD.bazel index 739a17684d..c8fe351ff7 100644 --- a/pkg/instancegroups/BUILD.bazel +++ b/pkg/instancegroups/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "delete.go", "instancegroups.go", "rollingupdate.go", + "settings.go", ], importpath = "k8s.io/kops/pkg/instancegroups", visibility = ["//visibility:public"], @@ -20,6 +21,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -29,7 +31,10 @@ go_library( go_test( name = "go_default_test", - srcs = ["rollingupdate_test.go"], + srcs = [ + "rollingupdate_test.go", + "settings_test.go", + ], embed = [":go_default_library"], deps = [ "//cloudmock/aws/mockautoscaling:go_default_library", @@ -42,6 +47,7 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", diff --git a/pkg/instancegroups/settings.go b/pkg/instancegroups/settings.go new file mode 100644 index 0000000000..3eb5a6371c --- /dev/null +++ b/pkg/instancegroups/settings.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package instancegroups + +import ( + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kops/pkg/apis/kops" +) + +func resolveSettings(cluster *kops.Cluster, group *kops.InstanceGroup, numInstances int) kops.RollingUpdate { + rollingUpdate := kops.RollingUpdate{} + if group.Spec.RollingUpdate != nil { + rollingUpdate = *group.Spec.RollingUpdate + } + + if def := cluster.Spec.RollingUpdate; def != nil { + if rollingUpdate.MaxUnavailable == nil { + rollingUpdate.MaxUnavailable = def.MaxUnavailable + } + } + + if rollingUpdate.MaxUnavailable == nil || rollingUpdate.MaxUnavailable.IntVal < 0 { + one := intstr.FromInt(1) + rollingUpdate.MaxUnavailable = &one + } + + if rollingUpdate.MaxUnavailable.Type == intstr.String { + unavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, numInstances, false) + if err != nil { + // If unparseable use the default value + unavailable = 1 + } + if unavailable <= 0 { + // While we round down, percentages should resolve to a minimum of 1 + unavailable = 1 + } + unavailableInt := intstr.FromInt(unavailable) + rollingUpdate.MaxUnavailable = &unavailableInt + } + + return rollingUpdate +} diff --git a/pkg/instancegroups/settings_test.go b/pkg/instancegroups/settings_test.go new file mode 100644 index 0000000000..dd49221f9c --- /dev/null +++ b/pkg/instancegroups/settings_test.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package instancegroups + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kops/pkg/apis/kops" +) + +func TestSettings(t *testing.T) { + for _, tc := range []struct { + name string + defaultValue interface{} + nonDefaultValue interface{} + }{ + { + name: "MaxUnavailable", + defaultValue: intstr.FromInt(1), + nonDefaultValue: intstr.FromInt(2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + defaultCluster := &kops.RollingUpdate{} + setFieldValue(defaultCluster, tc.name, tc.defaultValue) + + nonDefaultCluster := &kops.RollingUpdate{} + setFieldValue(nonDefaultCluster, tc.name, tc.nonDefaultValue) + + defaultGroup := &kops.RollingUpdate{} + setFieldValue(defaultGroup, tc.name, tc.defaultValue) + + nonDefaultGroup := &kops.RollingUpdate{} + setFieldValue(nonDefaultGroup, tc.name, tc.nonDefaultValue) + + assertResolvesValue(t, tc.name, tc.defaultValue, nil, nil, "nil nil") + assertResolvesValue(t, tc.name, tc.defaultValue, &kops.RollingUpdate{}, nil, "{nil} nil") + assertResolvesValue(t, tc.name, tc.defaultValue, defaultCluster, nil, "{default} nil") + assertResolvesValue(t, tc.name, tc.nonDefaultValue, nonDefaultCluster, nil, "{nonDefault} nil") + + assertResolvesValue(t, tc.name, tc.defaultValue, nil, &kops.RollingUpdate{}, "nil {nil}") + assertResolvesValue(t, tc.name, tc.defaultValue, &kops.RollingUpdate{}, &kops.RollingUpdate{}, "{nil} {nil}") + assertResolvesValue(t, tc.name, tc.defaultValue, defaultCluster, &kops.RollingUpdate{}, "{default} {nil}") + assertResolvesValue(t, tc.name, tc.nonDefaultValue, nonDefaultCluster, &kops.RollingUpdate{}, "{nonDefault} {nil}") + + assertResolvesValue(t, tc.name, tc.defaultValue, nil, defaultGroup, "nil {default}") + assertResolvesValue(t, tc.name, tc.defaultValue, &kops.RollingUpdate{}, defaultGroup, "{nil} {default}") + assertResolvesValue(t, tc.name, tc.defaultValue, defaultCluster, defaultGroup, "{default} {default}") + assertResolvesValue(t, tc.name, tc.defaultValue, nonDefaultCluster, defaultGroup, "{nonDefault} {default}") + + assertResolvesValue(t, tc.name, tc.nonDefaultValue, nil, nonDefaultGroup, "nil {nonDefault}") + assertResolvesValue(t, tc.name, tc.nonDefaultValue, &kops.RollingUpdate{}, nonDefaultGroup, "{nil} {nonDefault}") + assertResolvesValue(t, tc.name, tc.nonDefaultValue, defaultCluster, nonDefaultGroup, "{default} {nonDefault}") + assertResolvesValue(t, tc.name, tc.nonDefaultValue, nonDefaultCluster, nonDefaultGroup, "{nonDefault} {nonDefault}") + }) + } +} + +func setFieldValue(aStruct interface{}, fieldName string, fieldValue interface{}) { + field := reflect.ValueOf(aStruct).Elem().FieldByName(fieldName) + value := reflect.New(field.Type().Elem()) + value.Elem().Set(reflect.ValueOf(fieldValue)) + field.Set(value) +} + +func assertResolvesValue(t *testing.T, name string, expected interface{}, rollingUpdateDefault *kops.RollingUpdate, rollingUpdate *kops.RollingUpdate, msg interface{}) bool { + cluster := kops.Cluster{ + Spec: kops.ClusterSpec{ + RollingUpdate: rollingUpdateDefault, + }, + } + instanceGroup := kops.InstanceGroup{ + Spec: kops.InstanceGroupSpec{ + RollingUpdate: rollingUpdate, + }, + } + rollingUpdateDefaultCopy := rollingUpdateDefault.DeepCopy() + rollingUpdateCopy := rollingUpdate.DeepCopy() + + resolved := resolveSettings(&cluster, &instanceGroup, 1) + value := reflect.ValueOf(resolved).FieldByName(name) + + assert.Equal(t, rollingUpdateDefault, cluster.Spec.RollingUpdate, "cluster not modified") + assert.True(t, reflect.DeepEqual(rollingUpdateDefault, rollingUpdateDefaultCopy), "RollingUpdate not modified") + assert.Equal(t, rollingUpdate, instanceGroup.Spec.RollingUpdate, "instancegroup not modified") + assert.True(t, reflect.DeepEqual(rollingUpdate, rollingUpdateCopy), "RollingUpdate not modified") + + return assert.NotNil(t, value.Interface(), msg) && + assert.Equal(t, expected, value.Elem().Interface(), msg) +} + +func TestMaxUnavailable(t *testing.T) { + for _, tc := range []struct { + numInstances int + value string + expected int32 + }{ + { + numInstances: 1, + value: "0", + expected: 0, + }, + { + numInstances: 1, + value: "0%", + expected: 1, + }, + { + numInstances: 10, + value: "39%", + expected: 3, + }, + { + numInstances: 10, + value: "100%", + expected: 10, + }, + { + numInstances: 5, + value: "fnord", + expected: 1, + }, + { + numInstances: 5, + value: "-3", + expected: 1, + }, + { + numInstances: 5, + value: "-3%", + expected: 1, + }, + } { + t.Run(fmt.Sprintf("%s %d", tc.value, tc.numInstances), func(t *testing.T) { + value := intstr.Parse(tc.value) + rollingUpdate := kops.RollingUpdate{ + MaxUnavailable: &value, + } + instanceGroup := kops.InstanceGroup{ + Spec: kops.InstanceGroupSpec{ + RollingUpdate: &rollingUpdate, + }, + } + resolved := resolveSettings(&kops.Cluster{}, &instanceGroup, tc.numInstances) + assert.Equal(t, intstr.Int, resolved.MaxUnavailable.Type) + assert.Equal(t, tc.expected, resolved.MaxUnavailable.IntVal) + }) + } +} From 91f49205378faada362f2312e02cf652c2835985 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Fri, 29 Nov 2019 12:47:36 -0800 Subject: [PATCH 09/42] Extract drainTerminateAndWait() --- pkg/instancegroups/instancegroups.go | 110 +++++++++++++++------------ 1 file changed, 62 insertions(+), 48 deletions(-) diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index b737512db4..c1987ed2e8 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -149,57 +149,11 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd } for _, u := range update { - instanceId := u.ID - - nodeName := "" - if u.Node != nil { - nodeName = u.Node.Name - } - - if isBastion { - // We don't want to validate for bastions - they aren't part of the cluster - } else if rollingUpdateData.CloudOnly { - - klog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.") - - } else { - - if u.Node != nil { - klog.Infof("Draining the node: %q.", nodeName) - - if err = r.DrainNode(u, rollingUpdateData); err != nil { - if rollingUpdateData.FailOnDrainError { - return fmt.Errorf("failed to drain node %q: %v", nodeName, err) - } - klog.Infof("Ignoring error draining node %q: %v", nodeName, err) - } - } else { - klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId) - } - } - - // We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned - // (It often seems like GCE tries to re-use names) - if !isBastion && !rollingUpdateData.CloudOnly { - if u.Node == nil { - klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId) - } else { - klog.Infof("deleting node %q from kubernetes", nodeName) - if err := r.deleteNode(u.Node, rollingUpdateData); err != nil { - return fmt.Errorf("error deleting node %q: %v", nodeName, err) - } - } - } - - if err = r.DeleteInstance(u); err != nil { - klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err) + err = r.drainTerminateAndWait(u, rollingUpdateData, isBastion, sleepAfterTerminate) + if err != nil { return err } - // Wait for the minimum interval - klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate) - time.Sleep(sleepAfterTerminate) - if rollingUpdateData.CloudOnly { klog.Warningf("Not validating cluster as cloudonly flag is set.") @@ -218,6 +172,11 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd } if rollingUpdateData.Interactive { + nodeName := "" + if u.Node != nil { + nodeName = u.Node.Name + } + stopPrompting, err := promptInteractive(u.ID, nodeName) if err != nil { return err @@ -290,6 +249,61 @@ func (r *RollingUpdateInstanceGroup) patchTaint(rollingUpdateData *RollingUpdate return err } +func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, isBastion bool, sleepAfterTerminate time.Duration) error { + instanceId := u.ID + + nodeName := "" + if u.Node != nil { + nodeName = u.Node.Name + } + + if isBastion { + // We don't want to validate for bastions - they aren't part of the cluster + } else if rollingUpdateData.CloudOnly { + + klog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.") + + } else { + + if u.Node != nil { + klog.Infof("Draining the node: %q.", nodeName) + + if err := r.DrainNode(u, rollingUpdateData); err != nil { + if rollingUpdateData.FailOnDrainError { + return fmt.Errorf("failed to drain node %q: %v", nodeName, err) + } + klog.Infof("Ignoring error draining node %q: %v", nodeName, err) + } + } else { + klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId) + } + } + + // We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned + // (It often seems like GCE tries to re-use names) + if !isBastion && !rollingUpdateData.CloudOnly { + if u.Node == nil { + klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId) + } else { + klog.Infof("deleting node %q from kubernetes", nodeName) + if err := r.deleteNode(u.Node, rollingUpdateData); err != nil { + return fmt.Errorf("error deleting node %q: %v", nodeName, err) + } + } + } + + if err := r.DeleteInstance(u); err != nil { + klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err) + return err + } + + // Wait for the minimum interval + klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate) + time.Sleep(sleepAfterTerminate) + + return nil +} + // validateClusterWithDuration runs validation.ValidateCluster until either we get positive result or the timeout expires func (r *RollingUpdateInstanceGroup) validateClusterWithDuration(rollingUpdateData *RollingUpdateCluster, duration time.Duration) error { // Try to validate cluster at least once, this will handle durations that are lower From 09523740279f9ebb7d514dc261428ab4d7b492a6 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Wed, 4 Dec 2019 21:28:07 -0800 Subject: [PATCH 10/42] Extract maybeValidate --- pkg/instancegroups/instancegroups.go | 38 +++++++++++++++++----------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index c1987ed2e8..93d6b8c782 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -154,21 +154,9 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd return err } - if rollingUpdateData.CloudOnly { - klog.Warningf("Not validating cluster as cloudonly flag is set.") - - } else { - klog.Info("Validating the cluster.") - - if err = r.validateClusterWithDuration(rollingUpdateData, validationTimeout); err != nil { - - if rollingUpdateData.FailOnValidate { - klog.Errorf("Cluster did not validate within %s", validationTimeout) - return fmt.Errorf("error validating cluster after removing a node: %v", err) - } - - klog.Warningf("Cluster validation failed after removing instance, proceeding since fail-on-validate is set to false: %v", err) - } + err = r.maybeValidate(rollingUpdateData, validationTimeout) + if err != nil { + return err } if rollingUpdateData.Interactive { @@ -304,6 +292,26 @@ func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.Clo return nil } +func (r *RollingUpdateInstanceGroup) maybeValidate(rollingUpdateData *RollingUpdateCluster, validationTimeout time.Duration) error { + if rollingUpdateData.CloudOnly { + klog.Warningf("Not validating cluster as cloudonly flag is set.") + + } else { + klog.Info("Validating the cluster.") + + if err := r.validateClusterWithDuration(rollingUpdateData, validationTimeout); err != nil { + + if rollingUpdateData.FailOnValidate { + klog.Errorf("Cluster did not validate within %s", validationTimeout) + return fmt.Errorf("error validating cluster after removing a node: %v", err) + } + + klog.Warningf("Cluster validation failed after removing instance, proceeding since fail-on-validate is set to false: %v", err) + } + } + return nil +} + // validateClusterWithDuration runs validation.ValidateCluster until either we get positive result or the timeout expires func (r *RollingUpdateInstanceGroup) validateClusterWithDuration(rollingUpdateData *RollingUpdateCluster, duration time.Duration) error { // Try to validate cluster at least once, this will handle durations that are lower From 0c3651c9c895c6591c8649c1edc56c5119b896c9 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sun, 17 Nov 2019 22:57:34 -0800 Subject: [PATCH 11/42] Implement MaxUnavailable --- pkg/instancegroups/BUILD.bazel | 1 + pkg/instancegroups/instancegroups.go | 85 +++++++- pkg/instancegroups/rollingupdate_test.go | 261 ++++++++++++++++++++++- 3 files changed, 336 insertions(+), 11 deletions(-) diff --git a/pkg/instancegroups/BUILD.bazel b/pkg/instancegroups/BUILD.bazel index c8fe351ff7..520c15586c 100644 --- a/pkg/instancegroups/BUILD.bazel +++ b/pkg/instancegroups/BUILD.bazel @@ -44,6 +44,7 @@ go_test( "//upup/pkg/fi/cloudup/awsup:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index 93d6b8c782..5e5556608b 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -103,7 +103,6 @@ func promptInteractive(upgradedHostId, upgradedHostName string) (stopPrompting b // TODO: Temporarily increase size of ASG? // TODO: Remove from ASG first so status is immediately updated? -// TODO: Batch termination, like a rolling-update // RollingUpdate performs a rolling update on a list of ec2 instances. func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpdateCluster, cluster *api.Cluster, isBastion bool, sleepAfterTerminate time.Duration, validationTimeout time.Duration) (err error) { @@ -118,6 +117,8 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd return fmt.Errorf("rollingUpdate is missing a k8s client") } + noneReady := len(r.CloudGroup.Ready) == 0 + numInstances := len(r.CloudGroup.Ready) + len(r.CloudGroup.NeedUpdate) update := r.CloudGroup.NeedUpdate if rollingUpdateData.Force { update = append(update, r.CloudGroup.Ready...) @@ -148,15 +149,40 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd } } - for _, u := range update { - err = r.drainTerminateAndWait(u, rollingUpdateData, isBastion, sleepAfterTerminate) + settings := resolveSettings(cluster, r.CloudGroup.InstanceGroup, numInstances) + + concurrency := 0 + maxConcurrency := 1 + + if r.CloudGroup.InstanceGroup.Spec.Role == api.InstanceGroupRoleNode && !rollingUpdateData.Interactive { + maxConcurrency = settings.MaxUnavailable.IntValue() + if maxConcurrency == 0 { + klog.Infof("Rolling updates for InstanceGroup %s are disabled", r.CloudGroup.InstanceGroup.Name) + return nil + } + } + + terminateChan := make(chan error, maxConcurrency) + + for uIdx, u := range update { + go r.drainTerminateAndWait(u, rollingUpdateData, terminateChan, isBastion, sleepAfterTerminate) + concurrency++ + + // Wait until after one node is deleted and its replacement validates before the concurrent draining + // in case the current spec does not result in usable nodes. + if concurrency < maxConcurrency && (!noneReady || uIdx > 0) { + continue + } + + err = <-terminateChan + concurrency-- if err != nil { - return err + return waitForPendingBeforeReturningError(concurrency, terminateChan, err) } err = r.maybeValidate(rollingUpdateData, validationTimeout) if err != nil { - return err + return waitForPendingBeforeReturningError(concurrency, terminateChan, err) } if rollingUpdateData.Interactive { @@ -174,11 +200,47 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd rollingUpdateData.Interactive = false } } + + sweep: + for concurrency > 0 { + select { + case err = <-terminateChan: + concurrency-- + if err != nil { + return waitForPendingBeforeReturningError(concurrency, terminateChan, err) + } + default: + break sweep + } + } + } + + if concurrency > 0 { + for concurrency > 0 { + err = <-terminateChan + concurrency-- + if err != nil { + return waitForPendingBeforeReturningError(concurrency, terminateChan, err) + } + } + + err = r.maybeValidate(rollingUpdateData, validationTimeout) + if err != nil { + return err + } } return nil } +func waitForPendingBeforeReturningError(concurrency int, terminateChan chan error, err error) error { + for concurrency > 0 { + <-terminateChan + concurrency-- + } + return err +} + func (r *RollingUpdateInstanceGroup) taintAllNeedUpdate(update []*cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster) error { var toTaint []*corev1.Node for _, u := range update { @@ -237,7 +299,7 @@ func (r *RollingUpdateInstanceGroup) patchTaint(rollingUpdateData *RollingUpdate return err } -func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, isBastion bool, sleepAfterTerminate time.Duration) error { +func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, terminateChan chan error, isBastion bool, sleepAfterTerminate time.Duration) { instanceId := u.ID nodeName := "" @@ -258,7 +320,8 @@ func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.Clo if err := r.DrainNode(u, rollingUpdateData); err != nil { if rollingUpdateData.FailOnDrainError { - return fmt.Errorf("failed to drain node %q: %v", nodeName, err) + terminateChan <- fmt.Errorf("failed to drain node %q: %v", nodeName, err) + return } klog.Infof("Ignoring error draining node %q: %v", nodeName, err) } @@ -275,21 +338,23 @@ func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.Clo } else { klog.Infof("deleting node %q from kubernetes", nodeName) if err := r.deleteNode(u.Node, rollingUpdateData); err != nil { - return fmt.Errorf("error deleting node %q: %v", nodeName, err) + terminateChan <- fmt.Errorf("error deleting node %q: %v", nodeName, err) + return } } } if err := r.DeleteInstance(u); err != nil { klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err) - return err + terminateChan <- err + return } // Wait for the minimum interval klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate) time.Sleep(sleepAfterTerminate) - return nil + terminateChan <- nil } func (r *RollingUpdateInstanceGroup) maybeValidate(rollingUpdateData *RollingUpdateCluster, validationTimeout time.Duration) error { diff --git a/pkg/instancegroups/rollingupdate_test.go b/pkg/instancegroups/rollingupdate_test.go index b56a4294bf..ad894abe28 100644 --- a/pkg/instancegroups/rollingupdate_test.go +++ b/pkg/instancegroups/rollingupdate_test.go @@ -19,14 +19,17 @@ package instancegroups import ( "errors" "strings" + "sync" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" testingclient "k8s.io/client-go/testing" @@ -42,7 +45,7 @@ const ( taintPatch = "{\"spec\":{\"taints\":[{\"effect\":\"PreferNoSchedule\",\"key\":\"kops.k8s.io/scheduled-for-update\"}]}}" ) -func getTestSetup() (*RollingUpdateCluster, awsup.AWSCloud, *kopsapi.Cluster) { +func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud, *kopsapi.Cluster) { k8sClient := fake.NewSimpleClientset() mockcloud := awsup.BuildMockAWSCloud("us-east-1", "abc") @@ -602,6 +605,262 @@ func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) { assertGroupInstanceCount(t, cloud, "node-1", 1) } +func TestRollingUpdateSettingsIgnoredForMaster(t *testing.T) { + c, cloud, cluster := getTestSetup() + + two := intstr.FromInt(2) + cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ + MaxUnavailable: &two, + } + + groups := make(map[string]*cloudinstances.CloudInstanceGroup) + makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 3, 2) + err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) + assert.NoError(t, err, "rolling update") + + cordoned := "" + tainted := map[string]bool{} + deleted := map[string]bool{} + for _, action := range c.K8sClient.(*fake.Clientset).Actions() { + switch a := action.(type) { + case testingclient.PatchAction: + if string(a.GetPatch()) == cordonPatch { + assertCordon(t, a) + assert.Equal(t, "", cordoned, "at most one node cordoned at a time") + assert.True(t, tainted[a.GetName()], "node", a.GetName(), "tainted") + cordoned = a.GetName() + } else { + assertTaint(t, a) + assert.Equal(t, "", cordoned, "not tainting while node cordoned") + assert.False(t, tainted[a.GetName()], "node", a.GetName(), "already tainted") + tainted[a.GetName()] = true + } + case testingclient.DeleteAction: + assert.Equal(t, "nodes", a.GetResource().Resource) + assert.Equal(t, cordoned, a.GetName(), "node was cordoned before delete") + assert.False(t, deleted[a.GetName()], "node", a.GetName(), "already deleted") + deleted[a.GetName()] = true + cordoned = "" + case testingclient.ListAction: + // Don't care + default: + t.Errorf("unexpected action %v", a) + } + } + + assertGroupInstanceCount(t, cloud, "master-1", 1) +} + +func TestRollingUpdateDisabled(t *testing.T) { + c, cloud, cluster := getTestSetup() + + zero := intstr.FromInt(0) + cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ + MaxUnavailable: &zero, + } + + groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) + err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) + assert.NoError(t, err, "rolling update") + + assertGroupInstanceCount(t, cloud, "node-1", 3) + assertGroupInstanceCount(t, cloud, "node-2", 3) + assertGroupInstanceCount(t, cloud, "master-1", 0) + assertGroupInstanceCount(t, cloud, "bastion-1", 0) +} + +func TestRollingUpdateDisabledCloudonly(t *testing.T) { + c, cloud, cluster := getTestSetup() + c.CloudOnly = true + + zero := intstr.FromInt(0) + cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ + MaxUnavailable: &zero, + } + + groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) + err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) + assert.NoError(t, err, "rolling update") + + assertGroupInstanceCount(t, cloud, "node-1", 3) + assertGroupInstanceCount(t, cloud, "node-2", 3) + assertGroupInstanceCount(t, cloud, "master-1", 0) + assertGroupInstanceCount(t, cloud, "bastion-1", 0) +} + +// The concurrent update tests attempt to induce the following expected update sequence: +// +// (Only for "all need update" tests, to verify the toe-dipping behavior) +// Request validate (7) --> +// <-- validated +// Request terminate 1 node (7) --> +// <-- 1 node terminated, 6 left +// (end only for "all need update" tests) +// Request validate (6) --> +// <-- validated +// Request terminate 2 nodes (6,5) --> +// <-- 1 node terminated (5), 5 left +// Request validate (4) --> +// <-- 1 node terminated (6), 4 left +// <-- validated +// Request terminate 2 nodes (4,3) --> +// <-- 1 node terminated (3), 3 left +// Request validate (2) --> +// <-- validated +// Request terminate 1 node (2) --> +// <-- 1 node terminated (2), 2 left +// Request validate (1) --> +// <-- 1 node terminated (4), 1 left +// <-- validated +// Request terminate 1 node (1) --> +// <-- 1 node terminated, 0 left +// Request validate (0) --> +// <-- validated + +type concurrentTest struct { + autoscalingiface.AutoScalingAPI + t *testing.T + mutex sync.Mutex + terminationRequestsLeft int + previousValidation int + validationChan chan bool + terminationChan chan bool +} + +func (c *concurrentTest) Validate() (*validation.ValidationCluster, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + terminationRequestsLeft := c.terminationRequestsLeft + switch terminationRequestsLeft { + case 7, 6, 0: + assert.Equal(c.t, terminationRequestsLeft+1, c.previousValidation, "previous validation") + case 5, 3: + c.t.Errorf("unexpected call to Validate with %d termination requests left", terminationRequestsLeft) + case 4: + assert.Equal(c.t, 6, c.previousValidation, "previous validation") + c.terminationChan <- true + c.mutex.Unlock() + select { + case <-c.validationChan: + case <-time.After(1 * time.Second): + c.t.Error("timed out reading from validationChan") + } + c.mutex.Lock() + case 2: + assert.Equal(c.t, 4, c.previousValidation, "previous validation") + case 1: + assert.Equal(c.t, 2, c.previousValidation, "previous validation") + c.terminationChan <- true + c.mutex.Unlock() + select { + case <-c.validationChan: + case <-time.After(1 * time.Second): + c.t.Error("timed out reading from validationChan") + } + c.mutex.Lock() + } + c.previousValidation = terminationRequestsLeft + + return &validation.ValidationCluster{}, nil +} + +func (c *concurrentTest) TerminateInstanceInAutoScalingGroup(input *autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + terminationRequestsLeft := c.terminationRequestsLeft + c.terminationRequestsLeft-- + switch terminationRequestsLeft { + case 7, 2, 1: + assert.Equal(c.t, terminationRequestsLeft, c.previousValidation, "previous validation") + case 6, 4: + assert.Equal(c.t, terminationRequestsLeft, c.previousValidation, "previous validation") + c.mutex.Unlock() + select { + case <-c.terminationChan: + case <-time.After(1 * time.Second): + c.t.Error("timed out reading from terminationChan") + } + c.mutex.Lock() + go c.delayThenWakeValidation() + case 5, 3: + assert.Equal(c.t, terminationRequestsLeft+1, c.previousValidation, "previous validation") + } + return c.AutoScalingAPI.TerminateInstanceInAutoScalingGroup(input) +} + +func (c *concurrentTest) delayThenWakeValidation() { + time.Sleep(2 * time.Millisecond) // NodeInterval plus some + c.validationChan <- true +} + +func (c *concurrentTest) AssertComplete() { + c.mutex.Lock() + defer c.mutex.Unlock() + assert.Equal(c.t, 0, c.previousValidation, "last validation") +} + +func newConcurrentTest(t *testing.T, cloud *awsup.MockAWSCloud, allNeedUpdate bool) *concurrentTest { + test := concurrentTest{ + AutoScalingAPI: cloud.MockAutoscaling, + t: t, + terminationRequestsLeft: 6, + validationChan: make(chan bool), + terminationChan: make(chan bool), + } + if allNeedUpdate { + test.terminationRequestsLeft = 7 + } + test.previousValidation = test.terminationRequestsLeft + 1 + return &test +} + +func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) { + c, cloud, cluster := getTestSetup() + + concurrentTest := newConcurrentTest(t, cloud, true) + c.ValidateSuccessDuration = 0 + c.ClusterValidator = concurrentTest + cloud.MockAutoscaling = concurrentTest + + two := intstr.FromInt(2) + cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ + MaxUnavailable: &two, + } + + groups := make(map[string]*cloudinstances.CloudInstanceGroup) + makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 7) + + err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) + assert.NoError(t, err, "rolling update") + + assertGroupInstanceCount(t, cloud, "node-1", 0) + concurrentTest.AssertComplete() +} + +func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) { + c, cloud, cluster := getTestSetup() + + concurrentTest := newConcurrentTest(t, cloud, false) + c.ValidateSuccessDuration = 0 + c.ClusterValidator = concurrentTest + cloud.MockAutoscaling = concurrentTest + + two := intstr.FromInt(2) + cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ + MaxUnavailable: &two, + } + + groups := make(map[string]*cloudinstances.CloudInstanceGroup) + makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 6) + err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) + assert.NoError(t, err, "rolling update") + + assertGroupInstanceCount(t, cloud, "node-1", 1) + concurrentTest.AssertComplete() +} + func assertCordon(t *testing.T, action testingclient.PatchAction) { assert.Equal(t, "nodes", action.GetResource().Resource) assert.Equal(t, cordonPatch, string(action.GetPatch())) From 10d6416b8e5062bb19dd103a9f0c2fd75ed483c8 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sat, 11 Jan 2020 18:50:35 -0800 Subject: [PATCH 12/42] Allow MaxConcurrency for masters and bastions --- pkg/instancegroups/instancegroups.go | 15 ++--- pkg/instancegroups/rollingupdate_test.go | 77 +++++++++--------------- 2 files changed, 35 insertions(+), 57 deletions(-) diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index 5e5556608b..ae9b1317e7 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -152,14 +152,15 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd settings := resolveSettings(cluster, r.CloudGroup.InstanceGroup, numInstances) concurrency := 0 - maxConcurrency := 1 + maxConcurrency := settings.MaxUnavailable.IntValue() - if r.CloudGroup.InstanceGroup.Spec.Role == api.InstanceGroupRoleNode && !rollingUpdateData.Interactive { - maxConcurrency = settings.MaxUnavailable.IntValue() - if maxConcurrency == 0 { - klog.Infof("Rolling updates for InstanceGroup %s are disabled", r.CloudGroup.InstanceGroup.Name) - return nil - } + if maxConcurrency == 0 { + klog.Infof("Rolling updates for InstanceGroup %s are disabled", r.CloudGroup.InstanceGroup.Name) + return nil + } + + if rollingUpdateData.Interactive { + maxConcurrency = 1 } terminateChan := make(chan error, maxConcurrency) diff --git a/pkg/instancegroups/rollingupdate_test.go b/pkg/instancegroups/rollingupdate_test.go index ad894abe28..7084c40f51 100644 --- a/pkg/instancegroups/rollingupdate_test.go +++ b/pkg/instancegroups/rollingupdate_test.go @@ -605,52 +605,6 @@ func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) { assertGroupInstanceCount(t, cloud, "node-1", 1) } -func TestRollingUpdateSettingsIgnoredForMaster(t *testing.T) { - c, cloud, cluster := getTestSetup() - - two := intstr.FromInt(2) - cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ - MaxUnavailable: &two, - } - - groups := make(map[string]*cloudinstances.CloudInstanceGroup) - makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 3, 2) - err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) - assert.NoError(t, err, "rolling update") - - cordoned := "" - tainted := map[string]bool{} - deleted := map[string]bool{} - for _, action := range c.K8sClient.(*fake.Clientset).Actions() { - switch a := action.(type) { - case testingclient.PatchAction: - if string(a.GetPatch()) == cordonPatch { - assertCordon(t, a) - assert.Equal(t, "", cordoned, "at most one node cordoned at a time") - assert.True(t, tainted[a.GetName()], "node", a.GetName(), "tainted") - cordoned = a.GetName() - } else { - assertTaint(t, a) - assert.Equal(t, "", cordoned, "not tainting while node cordoned") - assert.False(t, tainted[a.GetName()], "node", a.GetName(), "already tainted") - tainted[a.GetName()] = true - } - case testingclient.DeleteAction: - assert.Equal(t, "nodes", a.GetResource().Resource) - assert.Equal(t, cordoned, a.GetName(), "node was cordoned before delete") - assert.False(t, deleted[a.GetName()], "node", a.GetName(), "already deleted") - deleted[a.GetName()] = true - cordoned = "" - case testingclient.ListAction: - // Don't care - default: - t.Errorf("unexpected action %v", a) - } - } - - assertGroupInstanceCount(t, cloud, "master-1", 1) -} - func TestRollingUpdateDisabled(t *testing.T) { c, cloud, cluster := getTestSetup() @@ -665,8 +619,8 @@ func TestRollingUpdateDisabled(t *testing.T) { assertGroupInstanceCount(t, cloud, "node-1", 3) assertGroupInstanceCount(t, cloud, "node-2", 3) - assertGroupInstanceCount(t, cloud, "master-1", 0) - assertGroupInstanceCount(t, cloud, "bastion-1", 0) + assertGroupInstanceCount(t, cloud, "master-1", 2) + assertGroupInstanceCount(t, cloud, "bastion-1", 1) } func TestRollingUpdateDisabledCloudonly(t *testing.T) { @@ -684,8 +638,8 @@ func TestRollingUpdateDisabledCloudonly(t *testing.T) { assertGroupInstanceCount(t, cloud, "node-1", 3) assertGroupInstanceCount(t, cloud, "node-2", 3) - assertGroupInstanceCount(t, cloud, "master-1", 0) - assertGroupInstanceCount(t, cloud, "bastion-1", 0) + assertGroupInstanceCount(t, cloud, "master-1", 2) + assertGroupInstanceCount(t, cloud, "bastion-1", 1) } // The concurrent update tests attempt to induce the following expected update sequence: @@ -861,6 +815,29 @@ func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) { concurrentTest.AssertComplete() } +func TestRollingUpdateMaxUnavailableAllNeedUpdateMaster(t *testing.T) { + c, cloud, cluster := getTestSetup() + + concurrentTest := newConcurrentTest(t, cloud, true) + c.ValidateSuccessDuration = 0 + c.ClusterValidator = concurrentTest + cloud.MockAutoscaling = concurrentTest + + two := intstr.FromInt(2) + cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ + MaxUnavailable: &two, + } + + groups := make(map[string]*cloudinstances.CloudInstanceGroup) + makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 7, 7) + + err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{}) + assert.NoError(t, err, "rolling update") + + assertGroupInstanceCount(t, cloud, "master-1", 0) + concurrentTest.AssertComplete() +} + func assertCordon(t *testing.T, action testingclient.PatchAction) { assert.Equal(t, "nodes", action.GetResource().Resource) assert.Equal(t, cordonPatch, string(action.GetPatch())) From 2a6aeaff7c2f2102d6abb44cd6b2a1fe7f55e71d Mon Sep 17 00:00:00 2001 From: Ciprian Hacman Date: Thu, 26 Dec 2019 22:55:30 +0200 Subject: [PATCH 13/42] Add support for containerd tar.gz package --- Makefile | 10 +-- nodeup/pkg/distros/identify.go | 3 +- nodeup/pkg/model/containerd.go | 63 ++++++++------ nodeup/pkg/model/convenience.go | 11 ++- nodeup/pkg/model/docker.go | 87 ++++++------------- nodeup/pkg/model/miscutils.go | 1 - nodeup/pkg/model/packages.go | 27 +++++- .../tests/containerdbuilder/simple/tasks.yaml | 13 +-- upup/pkg/fi/nodeup/nodetasks/archive.go | 47 +++++++--- upup/pkg/fi/nodeup/nodetasks/package.go | 23 ++++- 10 files changed, 163 insertions(+), 122 deletions(-) diff --git a/Makefile b/Makefile index 5ce152e66e..6856155b61 100644 --- a/Makefile +++ b/Makefile @@ -371,21 +371,21 @@ push: crossbuild-nodeup .PHONY: push-gce-dry push-gce-dry: push - ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=metadata://gce/config --dryrun --v=8 + ssh ${TARGET} sudo /tmp/nodeup --conf=metadata://gce/config --dryrun --v=8 .PHONY: push-gce-dry push-aws-dry: push - ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --dryrun --v=8 + ssh ${TARGET} sudo /tmp/nodeup --conf=/opt/kops/conf/kube_env.yaml --dryrun --v=8 .PHONY: push-gce-run push-gce-run: push ssh ${TARGET} sudo cp /tmp/nodeup /var/lib/toolbox/kubernetes-install/nodeup - ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /var/lib/toolbox/kubernetes-install/nodeup --conf=/var/lib/toolbox/kubernetes-install/kube_env.yaml --v=8 + ssh ${TARGET} sudo /var/lib/toolbox/kubernetes-install/nodeup --conf=/var/lib/toolbox/kubernetes-install/kube_env.yaml --v=8 # -t is for CentOS http://unix.stackexchange.com/questions/122616/why-do-i-need-a-tty-to-run-sudo-if-i-can-sudo-without-a-password .PHONY: push-aws-run push-aws-run: push - ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8 + ssh -t ${TARGET} sudo /tmp/nodeup --conf=/opt/kops/conf/kube_env.yaml --v=8 .PHONY: ${PROTOKUBE} ${PROTOKUBE}: @@ -727,7 +727,7 @@ bazel-push-gce-run: bazel-push .PHONY: bazel-push-aws-run bazel-push-aws-run: bazel-push ssh ${TARGET} chmod +x /tmp/nodeup - ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8 + ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/opt/kops/conf/kube_env.yaml --v=8 .PHONY: gazelle gazelle: diff --git a/nodeup/pkg/distros/identify.go b/nodeup/pkg/distros/identify.go index 29d190aa84..f73f487a91 100644 --- a/nodeup/pkg/distros/identify.go +++ b/nodeup/pkg/distros/identify.go @@ -113,7 +113,8 @@ func FindDistribution(rootfs string) (Distribution, error) { return DistributionContainerOS, nil } if strings.HasPrefix(line, "PRETTY_NAME=\"Amazon Linux 2") { - return DistributionCentos7, nil + // TODO: This is a hack. Amazon Linux is "special" and should get its own distro entry + return DistributionRhel7, nil } } klog.Warningf("unhandled /etc/os-release info %q", string(osRelease)) diff --git a/nodeup/pkg/model/containerd.go b/nodeup/pkg/model/containerd.go index c59afb7f50..63bf8b3c26 100644 --- a/nodeup/pkg/model/containerd.go +++ b/nodeup/pkg/model/containerd.go @@ -47,7 +47,6 @@ var containerdVersions = []packageVersion{ Version: "1.2.4-1", Source: "https://download.docker.com/linux/debian/dists/stretch/pool/stable/amd64/containerd.io_1.2.4-1_amd64.deb", Hash: "48c6ab0c908316af9a183de5aad64703bc516bdf", - Dependencies: []string{"libseccomp2", "pigz"}, }, // 1.2.10 - Debian Stretch @@ -59,7 +58,6 @@ var containerdVersions = []packageVersion{ Version: "1.2.10-3", Source: "https://download.docker.com/linux/debian/dists/stretch/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb", Hash: "186f2f2c570f37b363102e6b879073db6dec671d", - Dependencies: []string{"libseccomp2", "pigz"}, }, // 1.2.10 - Debian Buster @@ -71,7 +69,6 @@ var containerdVersions = []packageVersion{ Version: "1.2.10-3", Source: "https://download.docker.com/linux/debian/dists/buster/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb", Hash: "365e4a7541ce2cf3c3036ea2a9bf6b40a50893a8", - Dependencies: []string{"libseccomp2", "pigz"}, }, // 1.2.10 - Ubuntu Xenial @@ -83,7 +80,6 @@ var containerdVersions = []packageVersion{ Version: "1.2.10-3", Source: "https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb", Hash: "b64e7170d9176bc38967b2e12147c69b65bdd0fc", - Dependencies: []string{"libseccomp2", "pigz"}, }, // 1.2.10 - Ubuntu Bionic @@ -95,7 +91,6 @@ var containerdVersions = []packageVersion{ Version: "1.2.10-3", Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb", Hash: "f4c941807310e3fa470dddfb068d599174a3daec", - Dependencies: []string{"libseccomp2", "pigz"}, }, // 1.2.10 - CentOS / Rhel 7 @@ -107,14 +102,6 @@ var containerdVersions = []packageVersion{ Version: "1.2.10", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm", Hash: "f6447e84479df3a58ce04a3da87ccc384663493b", - ExtraPackages: map[string]packageInfo{ - "container-selinux": { - Version: "2.107", - Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm", - Hash: "7de4211fa0dfd240d8827b93763e1eb5f0d56411", - }, - }, - Dependencies: []string{"libseccomp", "policycoreutils-python"}, }, // 1.2.10 - CentOS / Rhel 8 @@ -126,7 +113,26 @@ var containerdVersions = []packageVersion{ Version: "1.2.10", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm", Hash: "f6447e84479df3a58ce04a3da87ccc384663493b", - Dependencies: []string{"container-selinux", "libseccomp", "pigz"}, + }, + + // 1.2.11 - Linux Generic + { + PackageVersion: "1.2.11", + PlainBinary: true, + Architectures: []Architecture{ArchitectureAmd64}, + Version: "1.2.11", + Source: "https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.2.11.linux-amd64.tar.gz", + Hash: "c98c9fdfd0984557e5b1a1f209213d2d8ad8471c", + }, + + // 1.3.2 - Linux Generic + { + PackageVersion: "1.3.2", + PlainBinary: true, + Architectures: []Architecture{ArchitectureAmd64}, + Version: "1.3.2", + Source: "https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.3.2.linux-amd64.tar.gz", + Hash: "f451d46280104588f236bee277bca1da8babc0e8", }, // TIP: When adding the next version, copy the previous version, string replace the version and run: @@ -220,11 +226,14 @@ func (b *ContainerdBuilder) Build(c *fi.ModelBuilderContext) error { var packageTask fi.Task if dv.PlainBinary { packageTask = &nodetasks.Archive{ - Name: "containerd", - Source: dv.Source, - Hash: dv.Hash, - TargetDir: "/usr/bin/", - StripComponents: 1, + Name: "containerd.io", + Source: dv.Source, + Hash: dv.Hash, + TargetDir: "/", + MapFiles: map[string]string{ + "./usr/local/bin": "/usr", + "./usr/local/sbin": "/usr", + }, } c.AddTask(packageTask) } else { @@ -283,6 +292,8 @@ func (b *ContainerdBuilder) Build(c *fi.ModelBuilderContext) error { } func (b *ContainerdBuilder) buildSystemdService() *nodetasks.Service { + // Based on https://github.com/containerd/cri/blob/master/contrib/systemd-units/containerd.service + manifest := &systemd.Manifest{} manifest.Set("Unit", "Description", "containerd container runtime") manifest.Set("Unit", "Documentation", "https://containerd.io") @@ -293,21 +304,21 @@ func (b *ContainerdBuilder) buildSystemdService() *nodetasks.Service { manifest.Set("Service", "ExecStartPre", "-/sbin/modprobe overlay") manifest.Set("Service", "ExecStart", "/usr/bin/containerd -c /etc/containerd/config-kops.toml \"$CONTAINERD_OPTS\"") - // kill only the containerd process, not all processes in the cgroup - manifest.Set("Service", "KillMode", "process") + manifest.Set("Service", "Restart", "always") + manifest.Set("Service", "RestartSec", "5") + // set delegate yes so that systemd does not reset the cgroups of containerd containers manifest.Set("Service", "Delegate", "yes") + // kill only the containerd process, not all processes in the cgroup + manifest.Set("Service", "KillMode", "process") + // make killing of processes of this unit under memory pressure very unlikely + manifest.Set("Service", "OOMScoreAdjust", "-999") manifest.Set("Service", "LimitNOFILE", "1048576") manifest.Set("Service", "LimitNPROC", "infinity") manifest.Set("Service", "LimitCORE", "infinity") manifest.Set("Service", "TasksMax", "infinity") - manifest.Set("Service", "Restart", "always") - manifest.Set("Service", "RestartSec", "2s") - manifest.Set("Service", "StartLimitInterval", "0") - manifest.Set("Service", "TimeoutStartSec", "0") - manifest.Set("Install", "WantedBy", "multi-user.target") manifestString := manifest.Render() diff --git a/nodeup/pkg/model/convenience.go b/nodeup/pkg/model/convenience.go index 0bed51626d..27587845fe 100644 --- a/nodeup/pkg/model/convenience.go +++ b/nodeup/pkg/model/convenience.go @@ -158,10 +158,15 @@ func (d *packageVersion) matches(arch Architecture, packageVersion string, distr return false } foundDistro := false - for _, d := range d.Distros { - if d == distro { - foundDistro = true + if len(d.Distros) > 0 { + for _, d := range d.Distros { + if d == distro { + foundDistro = true + } } + } else { + // Distro list is empty, assuming ANY + foundDistro = true } if !foundDistro { return false diff --git a/nodeup/pkg/model/docker.go b/nodeup/pkg/model/docker.go index 3395fc8892..831020e154 100644 --- a/nodeup/pkg/model/docker.go +++ b/nodeup/pkg/model/docker.go @@ -188,7 +188,7 @@ var dockerVersions = []packageVersion{ Hash: "a6b0243af348140236ed96f2e902b259c590eefa", }, }, - Dependencies: []string{"libtool-ltdl", "libseccomp"}, + Dependencies: []string{"libtool-ltdl"}, }, // 1.12.6 - k8s 1.6 @@ -216,7 +216,7 @@ var dockerVersions = []packageVersion{ Version: "1.12.6-0~debian-stretch", Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~debian-stretch_amd64.deb", Hash: "18bb7d024658f27a1221eae4de78d792bf00611b", - Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl", "libseccomp2"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers (>= 1.18~), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.97), libltdl7 (>= 2.4.6), libseccomp2 (>= 2.1.0), libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils }, @@ -242,7 +242,7 @@ var dockerVersions = []packageVersion{ Version: "1.12.6-0~ubuntu-xenial", Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~ubuntu-xenial_amd64.deb", Hash: "fffc22da4ad5b20715bbb6c485b2d2bb7e84fd33", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, // Depends: iptables, init-system-helpers (>= 1.18~), lsb-base (>= 4.1+Debian11ubuntu7), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.97), libltdl7 (>= 2.4.6), libseccomp2 (>= 2.1.0), libsystemd0 }, @@ -262,7 +262,7 @@ var dockerVersions = []packageVersion{ Hash: "9a6ee0d631ca911b6927450a3c396e9a5be75047", }, }, - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"}, + Dependencies: []string{"libtool-ltdl", "libcgroup"}, }, // 1.13.1 - k8s 1.8 @@ -316,7 +316,7 @@ var dockerVersions = []packageVersion{ Version: "1.13.1-0~ubuntu-xenial", Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.13.1-0~ubuntu-xenial_amd64.deb", Hash: "d12cbd686f44536c679a03cf0137df163f0bba5f", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, // Depends: iptables, init-system-helpers (>= 1.18~), lsb-base (>= 4.1+Debian11ubuntu7), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.97), libltdl7 (>= 2.4.6), libseccomp2 (>= 2.1.0), libsystemd0 }, @@ -336,7 +336,7 @@ var dockerVersions = []packageVersion{ Hash: "948c518a610af631fa98aa32d9bcd43e9ddd5ebc", }, }, - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python", "selinux-policy-base", "selinux-policy-targeted"}, + Dependencies: []string{"libtool-ltdl", "libcgroup", "selinux-policy-base", "selinux-policy-targeted"}, }, // 17.03.2 - k8s 1.8 @@ -389,7 +389,7 @@ var dockerVersions = []packageVersion{ Version: "17.03.2~ce-0~ubuntu-xenial", Source: "http://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.03.2~ce-0~ubuntu-xenial_amd64.deb", Hash: "4dcee1a05ec592e8a76e53e5b464ea43085a2849", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, MarkImmutable: []string{"/usr/bin/docker-runc"}, }, @@ -401,7 +401,7 @@ var dockerVersions = []packageVersion{ Architectures: []Architecture{ArchitectureAmd64}, Source: "http://download.docker.com/linux/static/stable/x86_64/docker-17.03.2-ce.tgz", Hash: "141716ae046016a1792ce232a0f4c8eed7fe37d1", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, MarkImmutable: []string{"/usr/bin/docker-runc"}, }, @@ -421,7 +421,7 @@ var dockerVersions = []packageVersion{ Hash: "4659c937b66519c88ef2a82a906bb156db29d191", }, }, - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"}, + Dependencies: []string{"libtool-ltdl", "libcgroup"}, MarkImmutable: []string{"/usr/bin/docker-runc"}, }, // 17.09.0 - k8s 1.8 @@ -471,7 +471,7 @@ var dockerVersions = []packageVersion{ Version: "17.09.0~ce-0~ubuntu", Source: "http://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb", Hash: "94f6e89be6d45d9988269a237eb27c7d6a844d7f", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor }, @@ -485,7 +485,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.2~ce~3-0~ubuntu", Source: "https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_18.06.2~ce~3-0~ubuntu_amd64.deb", Hash: "03e5eaae9c84b144e1140d9b418e43fce0311892", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor }, @@ -499,7 +499,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.3~ce~3-0~ubuntu", Source: "https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_18.06.3~ce~3-0~ubuntu_amd64.deb", Hash: "c06eda4e934cce6a7941a6af6602d4315b500a22", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers, lsb-base, libc6, libdevmapper1.02.1, libltdl7, libseccomp2, libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, pigz, xz-utils, apparmor }, @@ -513,14 +513,7 @@ var dockerVersions = []packageVersion{ Version: "17.09.0.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.09.0.ce-1.el7.centos.x86_64.rpm", Hash: "b4ce72e80ff02926de943082821bbbe73958f87a", - ExtraPackages: map[string]packageInfo{ - "container-selinux": { - Version: "2.68", - Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm", - Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0", - }, - }, - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"}, + Dependencies: []string{"libtool-ltdl", "libcgroup"}, }, // 18.03.1 - Bionic @@ -532,7 +525,7 @@ var dockerVersions = []packageVersion{ Version: "18.03.1~ce~3-0~ubuntu", Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/docker-ce_18.03.1~ce~3-0~ubuntu_amd64.deb", Hash: "b55b32bd0e9176dd32b1e6128ad9fda10a65cc8b", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor }, @@ -546,7 +539,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.2~ce~3-0~ubuntu", Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/docker-ce_18.06.2~ce~3-0~ubuntu_amd64.deb", Hash: "9607c67644e3e1ad9661267c99499004f2e84e05", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor }, @@ -610,14 +603,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.1.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.1.ce-3.el7.x86_64.rpm", Hash: "0a1325e570c5e54111a79623c9fd0c0c714d3a11", - ExtraPackages: map[string]packageInfo{ - "container-selinux": { - Version: "2.68", - Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm", - Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0", - }, - }, - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"}, + Dependencies: []string{"libtool-ltdl", "libcgroup"}, }, // 18.09.3 - Debian Stretch @@ -639,16 +625,6 @@ var dockerVersions = []packageVersion{ }, // 18.06.2 - CentOS / Rhel7 (two packages) - { - PackageVersion: "18.06.2", - Name: "container-selinux", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, - Architectures: []Architecture{ArchitectureAmd64}, - Version: "2.68", - Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm", - Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0", - Dependencies: []string{"policycoreutils-python"}, - }, { PackageVersion: "18.06.2", Name: "docker-ce", @@ -657,7 +633,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.2.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.2.ce-3.el7.x86_64.rpm", Hash: "456eb7c5bfb37fac342e9ade21b602c076c5b367", - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup"}, + Dependencies: []string{"libtool-ltdl", "libcgroup"}, }, // 18.06.3 (contains fix for CVE-2019-5736) @@ -671,7 +647,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.3~ce~3-0~ubuntu", Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/docker-ce_18.06.3~ce~3-0~ubuntu_amd64.deb", Hash: "b396678a8b70f0503a7b944fa6e3297ab27b345b", - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, //Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0 //Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor }, @@ -710,14 +686,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.3.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.3.ce-3.el7.x86_64.rpm", Hash: "5369602f88406d4fb9159dc1d3fd44e76fb4cab8", - ExtraPackages: map[string]packageInfo{ - "container-selinux": { - Version: "2.68", - Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm", - Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0", - }, - }, - Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"}, + Dependencies: []string{"libtool-ltdl", "libcgroup"}, }, // 18.06.3 - CentOS / Rhel8 (two packages) { @@ -728,7 +697,7 @@ var dockerVersions = []packageVersion{ Version: "18.06.3.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.3.ce-3.el7.x86_64.rpm", Hash: "5369602f88406d4fb9159dc1d3fd44e76fb4cab8", - Dependencies: []string{"container-selinux", "libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python-utils", "python3-policycoreutils"}, + Dependencies: []string{"libtool-ltdl", "libcgroup", "policycoreutils-python-utils", "python3-policycoreutils"}, }, // 18.09.9 - k8s 1.14 - https://github.com/kubernetes/kubernetes/pull/72823 @@ -749,7 +718,7 @@ var dockerVersions = []packageVersion{ Hash: "88f8f3103d2e5011e2f1a73b9e6dbf03d6e6698a", }, }, - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, }, // 18.09.9 - Debian Buster @@ -768,7 +737,7 @@ var dockerVersions = []packageVersion{ Hash: "510eee5b6884867be0d2b360f8ff8cf7f0c0d11a", }, }, - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, }, // 18.09.9 - Xenial @@ -825,7 +794,7 @@ var dockerVersions = []packageVersion{ Hash: "0c51b1339a95bd732ca305f07b7bcc95f132b9c8", }, }, - Dependencies: []string{"libtool-ltdl", "iptables"}, + Dependencies: []string{"libtool-ltdl"}, }, // 18.09.9 - CentOS / Rhel8 @@ -844,7 +813,7 @@ var dockerVersions = []packageVersion{ Hash: "0c51b1339a95bd732ca305f07b7bcc95f132b9c8", }, }, - Dependencies: []string{"libtool-ltdl", "iptables"}, + Dependencies: []string{"libtool-ltdl"}, }, // 19.03.4 - k8s 1.17 - https://github.com/kubernetes/kubernetes/pull/84476 @@ -865,7 +834,7 @@ var dockerVersions = []packageVersion{ Hash: "57f71ee764abb19a0b4c580ff14b1eb3de3a9e08", }, }, - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, }, // 19.03.4 - Debian Buster @@ -884,7 +853,7 @@ var dockerVersions = []packageVersion{ Hash: "2549a364f0e5ce489c79b292b78e349751385dd5", }, }, - Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"}, + Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"}, }, // 19.03.4 - Xenial @@ -941,7 +910,7 @@ var dockerVersions = []packageVersion{ Hash: "1fffcc716e74a59f753f8898ba96693a00e79e26", }, }, - Dependencies: []string{"libtool-ltdl", "iptables"}, + Dependencies: []string{"libtool-ltdl"}, }, // 19.03.4 - CentOS / Rhel8 @@ -960,7 +929,7 @@ var dockerVersions = []packageVersion{ Hash: "1fffcc716e74a59f753f8898ba96693a00e79e26", }, }, - Dependencies: []string{"libtool-ltdl", "iptables"}, + Dependencies: []string{"libtool-ltdl"}, }, // TIP: When adding the next version, copy the previous version, string replace the version and run: diff --git a/nodeup/pkg/model/miscutils.go b/nodeup/pkg/model/miscutils.go index 08bd2ae3ee..53dc2e8a42 100644 --- a/nodeup/pkg/model/miscutils.go +++ b/nodeup/pkg/model/miscutils.go @@ -50,7 +50,6 @@ func (b *MiscUtilsBuilder) Build(c *fi.ModelBuilderContext) error { var packages []string if b.Distribution.IsDebianFamily() { - packages = append(packages, "socat") packages = append(packages, "curl") packages = append(packages, "wget") packages = append(packages, "nfs-common") diff --git a/nodeup/pkg/model/packages.go b/nodeup/pkg/model/packages.go index b4bf4c14bf..1f49cc92d6 100644 --- a/nodeup/pkg/model/packages.go +++ b/nodeup/pkg/model/packages.go @@ -17,6 +17,7 @@ limitations under the License. package model import ( + "k8s.io/kops/nodeup/pkg/distros" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" @@ -37,17 +38,39 @@ func (b *PackagesBuilder) Build(c *fi.ModelBuilderContext) error { // ebtables - kops #1711 // ethtool - kops #1830 if b.Distribution.IsDebianFamily() { + // From containerd: https://github.com/containerd/cri/blob/master/contrib/ansible/tasks/bootstrap_ubuntu.yaml c.AddTask(&nodetasks.Package{Name: "conntrack"}) c.AddTask(&nodetasks.Package{Name: "ebtables"}) c.AddTask(&nodetasks.Package{Name: "ethtool"}) + c.AddTask(&nodetasks.Package{Name: "iptables"}) + c.AddTask(&nodetasks.Package{Name: "libseccomp2"}) + c.AddTask(&nodetasks.Package{Name: "pigz"}) + c.AddTask(&nodetasks.Package{Name: "socat"}) + c.AddTask(&nodetasks.Package{Name: "util-linux"}) } else if b.Distribution.IsRHELFamily() { + // From containerd: https://github.com/containerd/cri/blob/master/contrib/ansible/tasks/bootstrap_centos.yaml c.AddTask(&nodetasks.Package{Name: "conntrack-tools"}) c.AddTask(&nodetasks.Package{Name: "ebtables"}) c.AddTask(&nodetasks.Package{Name: "ethtool"}) + c.AddTask(&nodetasks.Package{Name: "iptables"}) + c.AddTask(&nodetasks.Package{Name: "libseccomp"}) c.AddTask(&nodetasks.Package{Name: "socat"}) + c.AddTask(&nodetasks.Package{Name: "util-linux"}) + + // Handle RHEL 7 and Amazon Linux 2 differently when installing "extras" + if b.Distribution != distros.DistributionRhel7 { + c.AddTask(&nodetasks.Package{Name: "container-selinux"}) + c.AddTask(&nodetasks.Package{Name: "pigz"}) + } else { + c.AddTask(&nodetasks.Package{ + Name: "container-selinux", + Source: s("http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm"), + Hash: s("7de4211fa0dfd240d8827b93763e1eb5f0d56411"), + }) + } } else { - // Hopefully it's already installed - klog.Infof("ebtables package not known for distro %q", b.Distribution) + // Hopefully they are already installed + klog.Warningf("unknown distribution, skipping required packages install: %v", b.Distribution) } return nil diff --git a/nodeup/pkg/model/tests/containerdbuilder/simple/tasks.yaml b/nodeup/pkg/model/tests/containerdbuilder/simple/tasks.yaml index 49ce5cffdb..11717a8360 100644 --- a/nodeup/pkg/model/tests/containerdbuilder/simple/tasks.yaml +++ b/nodeup/pkg/model/tests/containerdbuilder/simple/tasks.yaml @@ -208,10 +208,6 @@ preventStart: true source: https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb version: 1.2.10-3 --- -Name: libseccomp2 ---- -Name: pigz ---- Name: containerd.service definition: | [Unit] @@ -224,16 +220,15 @@ definition: | EnvironmentFile=/etc/environment ExecStartPre=-/sbin/modprobe overlay ExecStart=/usr/bin/containerd -c /etc/containerd/config-kops.toml "$CONTAINERD_OPTS" - KillMode=process + Restart=always + RestartSec=5 Delegate=yes + KillMode=process + OOMScoreAdjust=-999 LimitNOFILE=1048576 LimitNPROC=infinity LimitCORE=infinity TasksMax=infinity - Restart=always - RestartSec=2s - StartLimitInterval=0 - TimeoutStartSec=0 [Install] WantedBy=multi-user.target diff --git a/upup/pkg/fi/nodeup/nodetasks/archive.go b/upup/pkg/fi/nodeup/nodetasks/archive.go index 8151a9cb06..d6306d1fb7 100644 --- a/upup/pkg/fi/nodeup/nodetasks/archive.go +++ b/upup/pkg/fi/nodeup/nodetasks/archive.go @@ -23,8 +23,10 @@ import ( "os" "os/exec" "path" + "path/filepath" "reflect" "strconv" + "strings" "k8s.io/klog" "k8s.io/kops/upup/pkg/fi" @@ -47,6 +49,9 @@ type Archive struct { // StripComponents is the number of components to remove when expanding the archive StripComponents int `json:"stripComponents,omitempty"` + + // MapFiles is the list of files to extract with corresponding directories to extract + MapFiles map[string]string `json:"mapFiles,omitempty"` } const ( @@ -155,20 +160,38 @@ func (_ *Archive) RenderLocal(t *local.LocalTarget, a, e, changes *Archive) erro return err } - targetDir := e.TargetDir - if err := os.MkdirAll(targetDir, 0755); err != nil { - return fmt.Errorf("error creating directories %q: %v", targetDir, err) - } + if len(e.MapFiles) == 0 { + targetDir := e.TargetDir + if err := os.MkdirAll(targetDir, 0755); err != nil { + return fmt.Errorf("error creating directories %q: %v", targetDir, err) + } - args := []string{"tar", "xf", localFile, "-C", targetDir} - if e.StripComponents != 0 { - args = append(args, "--strip-components="+strconv.Itoa(e.StripComponents)) - } + args := []string{"tar", "xf", localFile, "-C", targetDir} + if e.StripComponents != 0 { + args = append(args, "--strip-components="+strconv.Itoa(e.StripComponents)) + } - klog.Infof("running command %s", args) - cmd := exec.Command(args[0], args[1:]...) - if output, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("error installing archive %q: %v: %s", e.Name, err, string(output)) + klog.Infof("running command %s", args) + cmd := exec.Command(args[0], args[1:]...) + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("error installing archive %q: %v: %s", e.Name, err, string(output)) + } + } else { + for src, dest := range e.MapFiles { + stripCount := strings.Count(src, "/") + targetDir := filepath.Join(e.TargetDir, dest) + if err := os.MkdirAll(targetDir, 0755); err != nil { + return fmt.Errorf("error creating directories %q: %v", targetDir, err) + } + + args := []string{"tar", "xf", localFile, "-C", targetDir, "--strip-components=" + strconv.Itoa(stripCount), src} + + klog.Infof("running command %s", args) + cmd := exec.Command(args[0], args[1:]...) + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("error installing archive %q: %v: %s", e.Name, err, string(output)) + } + } } // We write a marker file to prevent re-execution diff --git a/upup/pkg/fi/nodeup/nodetasks/package.go b/upup/pkg/fi/nodeup/nodetasks/package.go index 5d78805bec..614970e784 100644 --- a/upup/pkg/fi/nodeup/nodetasks/package.go +++ b/upup/pkg/fi/nodeup/nodetasks/package.go @@ -54,9 +54,10 @@ type Package struct { } const ( - localPackageDir = "/var/cache/nodeup/packages/" - containerdPackageName = "containerd.io" - dockerPackageName = "docker-ce" + localPackageDir = "/var/cache/nodeup/packages/" + containerSelinuxPackageName = "container-selinux" + containerdPackageName = "containerd.io" + dockerPackageName = "docker-ce" ) var _ fi.HasDependencies = &Package{} @@ -83,10 +84,24 @@ func (e *Package) GetDependencies(tasks map[string]fi.Task) []fi.Task { } } - // Docker should wait for containerd to be installed + // containerd should wait for container-selinux to be installed + if e.Name == containerdPackageName { + for _, v := range tasks { + if vp, ok := v.(*Package); ok { + if vp.Name == containerSelinuxPackageName { + deps = append(deps, v) + } + } + } + } + + // Docker should wait for container-selinux and containerd to be installed if e.Name == dockerPackageName { for _, v := range tasks { if vp, ok := v.(*Package); ok { + if vp.Name == containerSelinuxPackageName { + deps = append(deps, v) + } if vp.Name == containerdPackageName { deps = append(deps, v) } From 6b1a131528d039590490dd2296473a82ef9824d8 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Fri, 27 Dec 2019 20:38:46 -0800 Subject: [PATCH 14/42] Remove addons only applicable to unsupported versions of Kubernetes --- .../pre-k8s-1.6.yaml.template | 39 -- .../pre-k8s-1.6.yaml.template | 39 -- .../k8s-1.7.yaml.template | 226 -------- .../pre-k8s-1.6.yaml.template | 109 ---- .../networking.kope.io/pre-k8s-1.6.yaml | 40 -- .../k8s-1.6.yaml.template | 373 ------------- .../k8s-1.8.yaml.template | 456 --------------- .../pre-k8s-1.6.yaml.template | 215 ------- .../k8s-1.6.yaml.template | 523 ------------------ .../pre-k8s-1.6.yaml.template | 272 --------- .../networking.weave/k8s-1.6.yaml.template | 241 -------- .../networking.weave/k8s-1.7.yaml.template | 258 --------- .../pre-k8s-1.6.yaml.template | 129 ----- .../v1.8.0.yaml.template | 138 ----- .../storage-aws.addons.k8s.io/v1.6.0.yaml | 24 - .../storage-gce.addons.k8s.io/v1.6.0.yaml | 13 - .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 359 ++---------- 17 files changed, 53 insertions(+), 3401 deletions(-) delete mode 100644 upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.flannel/pre-k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.kope.io/pre-k8s-1.6.yaml delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.8.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.weave/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.weave/k8s-1.7.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.weave/pre-k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/storage-aws.addons.k8s.io/v1.6.0.yaml delete mode 100644 upup/models/cloudup/resources/addons/storage-gce.addons.k8s.io/v1.6.0.yaml diff --git a/upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml.template deleted file mode 100644 index c872597d8a..0000000000 --- a/upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml.template +++ /dev/null @@ -1,39 +0,0 @@ -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - name: dns-controller - namespace: kube-system - labels: - k8s-addon: dns-controller.addons.k8s.io - k8s-app: dns-controller - version: v1.17.0-alpha.1 -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: dns-controller - template: - metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - k8s-app: dns-controller - version: v1.17.0-alpha.1 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]' - spec: - nodeSelector: - kubernetes.io/role: master - dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) - hostNetwork: true - containers: - - name: dns-controller - image: kope/dns-controller:1.17.0-alpha.1 - command: -{{ range $arg := DnsControllerArgv }} - - "{{ $arg }}" -{{ end }} - resources: - requests: - cpu: 50m - memory: 50Mi diff --git a/upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml.template deleted file mode 100644 index 9d111d8c93..0000000000 --- a/upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml.template +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: external-dns - namespace: kube-system - labels: - k8s-addon: external-dns.addons.k8s.io - k8s-app: external-dns - version: v0.4.4 -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: external-dns - template: - metadata: - labels: - k8s-addon: external-dns.addons.k8s.io - k8s-app: external-dns - version: v0.4.4 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]' - spec: - nodeSelector: - kubernetes.io/role: master - dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) - hostNetwork: true - containers: - - name: external-dns - image: registry.opensource.zalan.do/teapot/external-dns:v0.4.4 - args: -{{ range $arg := ExternalDnsArgv }} - - "{{ $arg }}" -{{ end }} - resources: - requests: - cpu: 50m - memory: 50Mi diff --git a/upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml.template b/upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml.template deleted file mode 100644 index bbd93ed638..0000000000 --- a/upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml.template +++ /dev/null @@ -1,226 +0,0 @@ -# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.3.3/config/v1.3/aws-k8s-cni.yaml - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: aws-node -rules: -- apiGroups: - - crd.k8s.amazonaws.com - resources: - - "*" - - namespaces - verbs: - - "*" -- apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: ["list", "watch", "get"] -- apiGroups: ["extensions"] - resources: - - daemonsets - verbs: ["list", "watch"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: aws-node - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: aws-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: aws-node -subjects: -- kind: ServiceAccount - name: aws-node - namespace: kube-system ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: aws-node - namespace: kube-system - labels: - k8s-app: aws-node -spec: - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - k8s-app: aws-node - template: - metadata: - labels: - k8s-app: aws-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - serviceAccountName: aws-node - hostNetwork: true - tolerations: - - operator: Exists - containers: - - image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.3.3" }}" - ports: - - containerPort: 61678 - name: metrics - name: aws-node - env: - - name: CLUSTER_NAME - value: {{ ClusterName }} - - name: AWS_VPC_K8S_CNI_LOGLEVEL - value: DEBUG - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - requests: - cpu: 10m - securityContext: - privileged: true - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /host/var/log - name: log-dir - - mountPath: /var/run/docker.sock - name: dockersock - volumes: - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: log-dir - hostPath: - path: /var/log - - name: dockersock - hostPath: - path: /var/run/docker.sock ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: eniconfigs.crd.k8s.amazonaws.com -spec: - scope: Cluster - group: crd.k8s.amazonaws.com - version: v1alpha1 - names: - plural: eniconfigs - singular: eniconfig - kind: ENIConfig - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" - diff --git a/upup/models/cloudup/resources/addons/networking.flannel/pre-k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.flannel/pre-k8s-1.6.yaml.template deleted file mode 100644 index f314c36205..0000000000 --- a/upup/models/cloudup/resources/addons/networking.flannel/pre-k8s-1.6.yaml.template +++ /dev/null @@ -1,109 +0,0 @@ -kind: ServiceAccount -apiVersion: v1 -metadata: - name: flannel - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - k8s-app: flannel - role.kubernetes.io/networking: "1" -data: - cni-conf.json: | - { - "name": "cbr0", - "type": "flannel", - "delegate": { - "forceAddress": true, - "isDefaultGateway": true - } - } - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "{{ FlannelBackendType }}" - } - } ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: kube-flannel-ds - namespace: kube-system - labels: - k8s-app: flannel - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - tier: node - app: flannel - role.kubernetes.io/networking: "1" - spec: - hostNetwork: true - nodeSelector: - beta.kubernetes.io/arch: amd64 - serviceAccountName: flannel - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.11.0-amd64 - command: - - "/opt/bin/flanneld" - - "--ip-masq" - - "--kube-subnet-mgr" - - "--iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }}" - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - limits: - cpu: 100m - memory: 100Mi - requests: - memory: 100Mi - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: install-cni - image: quay.io/coreos/flannel:v0.11.0-amd64 - command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ] - resources: - limits: - cpu: 10m - memory: 25Mi - requests: - cpu: 10m - memory: 25Mi - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg diff --git a/upup/models/cloudup/resources/addons/networking.kope.io/pre-k8s-1.6.yaml b/upup/models/cloudup/resources/addons/networking.kope.io/pre-k8s-1.6.yaml deleted file mode 100644 index 8babc915ae..0000000000 --- a/upup/models/cloudup/resources/addons/networking.kope.io/pre-k8s-1.6.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: kopeio-networking-agent - namespace: kube-system - labels: - k8s-addon: networking.kope.io - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - name: kopeio-networking-agent - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - hostPID: true - hostIPC: true - hostNetwork: true - containers: - - resources: - requests: - cpu: 50m - memory: 100Mi - limits: - memory: 100Mi - securityContext: - privileged: true - image: kopeio/networking-agent:1.0.20181028 - name: networking-agent - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - volumes: - - name: lib-modules - hostPath: - path: /lib/modules diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.6.yaml.template deleted file mode 100644 index e005b7e09a..0000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.6.yaml.template +++ /dev/null @@ -1,373 +0,0 @@ -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "hostname": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: canal - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: "CriticalAddonsOnly" - operator: "Exists" - - effect: NoExecute - operator: Exists - # Allow the pod to run on all nodes. This is required - # for cluster communication - - effect: NoSchedule - operator: Exists - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.4.1 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix logging. - - name: FELIX_LOGSEVERITYSYS - value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,canal" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" - # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - securityContext: - privileged: true - resources: - requests: - cpu: 100m - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.10.0 - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.9.0 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system - ---- - -# Calico Roles -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - namespace: kube-system -rules: - - apiGroups: [""] - resources: - - namespaces - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["extensions"] - resources: - - thirdpartyresources - verbs: - - create - - get - - list - - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["projectcalico.org"] - resources: - - globalbgppeers - verbs: - - get - - list - - apiGroups: ["projectcalico.org"] - resources: - - globalconfigs - - globalbgpconfigs - verbs: - - create - - get - - list - - update - - watch - - apiGroups: ["projectcalico.org"] - resources: - - ippools - verbs: - - create - - get - - list - - update - - watch - - apiGroups: ["alpha.projectcalico.org"] - resources: - - systemnetworkpolicies - verbs: - - get - - list - - watch - ---- - -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.8.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.8.yaml.template deleted file mode 100644 index 9e8271f779..0000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.8.yaml.template +++ /dev/null @@ -1,456 +0,0 @@ -# Canal w/ Calico Version v2.6.2 -# https://docs.projectcalico.org/v2.6/releases#v2.6.2 -# This manifest includes the following component versions: -# calico/node:v2.6.2 -# calico/cni:v1.11.0 -# coreos/flannel:v0.9.0 (bug with v0.9.1: https://github.com/kubernetes/kops/issues/4037) - -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "capabilities": {"portMappings": true}, - "snat": true - } - ] - } - - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: canal - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: "CriticalAddonsOnly" - operator: "Exists" - - effect: NoExecute - operator: Exists - # Allow the pod to run on all nodes. This is required - # for cluster communication - - effect: NoSchedule - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.6.7 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix logging. - - name: FELIX_LOGSEVERITYSYS - value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,canal" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" - # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 50m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.11.2 - command: ["/install-cni.sh"] - env: - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.9.0 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - resources: - limits: - memory: 100Mi - requests: - cpu: 50m - memory: 100Mi - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config - - -# Create all the CustomResourceDefinitions needed for -# Calico policy-only mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalfelixconfigs.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalFelixConfig - plural: globalfelixconfigs - singular: globalfelixconfig - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalbgpconfigs.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalBGPConfig - plural: globalbgpconfigs - singular: globalbgpconfig - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system - ---- - -# Calico Roles -# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - namespace: kube-system -rules: - - apiGroups: [""] - resources: - - namespaces - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - bgppeers - - globalbgpconfigs - - ippools - - globalnetworkpolicies - verbs: - - create - - get - - list - - update - - watch - ---- - -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- - -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -# Bind the calico ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml.template deleted file mode 100644 index bc46bda55c..0000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml.template +++ /dev/null @@ -1,215 +0,0 @@ -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "hostname": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key": "CriticalAddonsOnly", "operator": "Exists"}] - spec: - hostNetwork: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.4.1 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix logging. - - name: FELIX_LOGSEVERITYSYS - value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,canal" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" - # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - securityContext: - privileged: true - resources: - requests: - cpu: 100m - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.10.0 - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.9.1 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template deleted file mode 100644 index 6c97cb9c57..0000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template +++ /dev/null @@ -1,523 +0,0 @@ -{{- $etcd_scheme := EtcdScheme }} -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # etcd servers - etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} - {{- range $j, $member := $cluster.Members -}} - {{- if $j }},{{ end -}} - {{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 - {{- end }}" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - {{- if eq $etcd_scheme "https" }} - "etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem", - "etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem", - "etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem", - "etcd_scheme": "https", - {{- end }} - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: calico - namespace: kube-system - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: calico - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.6.9 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - # Enable BGP. Disable to enforce policy only. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Auto-detect the BGP IP address. - - name: IP - value: "" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to the desired level - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.11.5 - resources: - requests: - cpu: 10m - imagePullPolicy: Always - command: ["/install-cni.sh"] - env: - # The name of calico config file - - name: CNI_CONF_NAME - value: 10-calico.conf - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - ---- - -# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then -# be removed entirely once the new kube-controllers deployment has been deployed above. -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy -spec: - # Turn this deployment off in favor of the kube-controllers deployment above. - replicas: 0 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy - spec: - hostNetwork: true - serviceAccountName: calico - containers: - - name: calico-policy-controller - # This shouldn't get updated, since this is the last version we shipped that should be used. - image: quay.io/calico/kube-policy-controller:v0.7.0 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} ---- - -# This manifest deploys the Calico Kubernetes controllers. -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" -spec: - # The controllers can only have a single active instance. - replicas: 1 - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # The controllers must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - serviceAccountName: calico - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - containers: - - name: calico-kube-controllers - image: quay.io/calico/kube-controllers:v1.0.4 - resources: - requests: - cpu: 10m - env: - # By default only policy, profile, workloadendpoint are turned - # on, node controller will decommission nodes that do not exist anymore - # this and CALICO_K8S_NODE_REF in calico-node fixes #3224, but invalid nodes that are - # already registered in calico needs to be deleted manually, see - # https://docs.projectcalico.org/v2.6/usage/decommissioning-a-node - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - volumeMounts: - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} -# This manifest installs the k8s-ec2-srcdst container, which disables -# src/dst ip checks to allow BGP to function for calico for hosts within subnets -# This only applies for AWS environments. ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" -{{- end -}} diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template deleted file mode 100644 index afff9eead1..0000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template +++ /dev/null @@ -1,272 +0,0 @@ -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The calico-etcd PetSet service IP:port - etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} - {{- range $j, $member := $cluster.Members -}} - {{- if $j }},{{ end -}} - http://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 - {{- end }}" - -# Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.4.0 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}" - # Auto-detect the BGP IP address. - - name: IP - value: "" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.10.0 - resources: - requests: - cpu: 10m - imagePullPolicy: Always - command: ["/install-cni.sh"] - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - ---- - -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy - role.kubernetes.io/networking: "1" -spec: - # The policy controller can only have a single active instance. - replicas: 1 - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy-controller - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - containers: - - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.7.0 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} ---- -# This manifest installs the k8s-ec2-srcdst container, which disables -# src/dst ip checks to allow BGP to function for calico for hosts within subnets -# This only applies for AWS environments. -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - kubernetes.io/role: master -{{- end -}} diff --git a/upup/models/cloudup/resources/addons/networking.weave/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.weave/k8s-1.6.yaml.template deleted file mode 100644 index 2b7d79831e..0000000000 --- a/upup/models/cloudup/resources/addons/networking.weave/k8s-1.6.yaml.template +++ /dev/null @@ -1,241 +0,0 @@ -{{- if WeaveSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: weave-net - namespace: kube-system -stringData: - network-password: {{ WeaveSecret }} ---- -{{- end }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - '' - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net - role.kubernetes.io/networking: "1" -roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net -rules: - - apiGroups: - - '' - resources: - - configmaps - resourceNames: - - weave-net - verbs: - - get - - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net -roleRef: - kind: Role - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - name: weave-net - role.kubernetes.io/networking: "1" - annotations: - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: {{ .KubeControllerManager.ClusterCIDR }} - {{- if .Networking.Weave.MTU }} - - name: WEAVE_MTU - value: "{{ .Networking.Weave.MTU }}" - {{- end }} - {{- if .Networking.Weave.ConnLimit }} - - name: CONN_LIMIT - value: "{{ .Networking.Weave.ConnLimit }}" - {{- end }} - {{- if .Networking.Weave.NetExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NetExtraArgs }}" - {{- end }} - {{- if WeaveSecret }} - - name: WEAVE_PASSWORD - valueFrom: - secretKeyRef: - name: weave-net - key: network-password - {{- end }} - image: 'weaveworks/weave-kube:2.3.0' - ports: - - name: metrics - containerPort: 6782 - readinessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - resources: - requests: - cpu: {{ or .Networking.Weave.CPURequest "50m" }} - memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.CPULimit }} - cpu: {{ .Networking.Weave.CPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: weave-npc - args: - - '--use-legacy-netpol' - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: 'weaveworks/weave-npc:2.3.0' - ports: - - name: metrics - containerPort: 6781 - resources: - requests: - cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }} - memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.NPCCPULimit }} - cpu: {{ .Networking.Weave.NPCCPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }} - securityContext: - privileged: true - hostNetwork: true - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules - updateStrategy: - type: RollingUpdate diff --git a/upup/models/cloudup/resources/addons/networking.weave/k8s-1.7.yaml.template b/upup/models/cloudup/resources/addons/networking.weave/k8s-1.7.yaml.template deleted file mode 100644 index c4401df975..0000000000 --- a/upup/models/cloudup/resources/addons/networking.weave/k8s-1.7.yaml.template +++ /dev/null @@ -1,258 +0,0 @@ -{{- if WeaveSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: weave-net - namespace: kube-system -stringData: - network-password: {{ WeaveSecret }} ---- -{{- end }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - '' - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - 'networking.k8s.io' - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - '' - resources: - - nodes/status - verbs: - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net - role.kubernetes.io/networking: "1" -roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net -rules: - - apiGroups: - - '' - resources: - - configmaps - resourceNames: - - weave-net - verbs: - - get - - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net -roleRef: - kind: Role - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net - role.kubernetes.io/networking: "1" -spec: - # Wait 5 seconds to let pod connect before rolling next pod - minReadySeconds: 5 - template: - metadata: - labels: - name: weave-net - role.kubernetes.io/networking: "1" - annotations: - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: {{ .KubeControllerManager.ClusterCIDR }} - {{- if .Networking.Weave.MTU }} - - name: WEAVE_MTU - value: "{{ .Networking.Weave.MTU }}" - {{- end }} - {{- if .Networking.Weave.ConnLimit }} - - name: CONN_LIMIT - value: "{{ .Networking.Weave.ConnLimit }}" - {{- end }} - {{- if .Networking.Weave.NetExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NetExtraArgs }}" - {{- end }} - {{- if WeaveSecret }} - - name: WEAVE_PASSWORD - valueFrom: - secretKeyRef: - name: weave-net - key: network-password - {{- end }} - image: 'weaveworks/weave-kube:2.6.0' - ports: - - name: metrics - containerPort: 6782 - readinessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - resources: - requests: - cpu: {{ or .Networking.Weave.CPURequest "50m" }} - memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.CPULimit }} - cpu: {{ .Networking.Weave.CPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: xtables-lock - mountPath: /run/xtables.lock - - name: weave-npc - args: [] - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: 'weaveworks/weave-npc:2.6.0' - ports: - - name: metrics - containerPort: 6781 - resources: - requests: - cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }} - memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.NPCCPULimit }} - cpu: {{ .Networking.Weave.NPCCPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - hostNetwork: true - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules - - name: xtables-lock - hostPath: - path: /run/xtables.lock - updateStrategy: - type: RollingUpdate diff --git a/upup/models/cloudup/resources/addons/networking.weave/pre-k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.weave/pre-k8s-1.6.yaml.template deleted file mode 100644 index 453b6c106b..0000000000 --- a/upup/models/cloudup/resources/addons/networking.weave/pre-k8s-1.6.yaml.template +++ /dev/null @@ -1,129 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: weave-net - labels: - name: weave-net - role.kubernetes.io/networking: "1" - namespace: kube-system ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: weave-net - labels: - name: weave-net - role.kubernetes.io/networking: "1" - namespace: kube-system -spec: - template: - metadata: - annotations: - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: >- - [{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"},{"key":"CriticalAddonsOnly", "operator":"Exists"}] - labels: - name: weave-net - role.kubernetes.io/networking: "1" - spec: - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: {{ .KubeControllerManager.ClusterCIDR }} - {{- if .Networking.Weave.MTU }} - - name: WEAVE_MTU - value: "{{ .Networking.Weave.MTU }}" - {{- end }} - {{- if .Networking.Weave.ConnLimit }} - - name: CONN_LIMIT - value: "{{ .Networking.Weave.ConnLimit }}" - {{- end }} - image: 'weaveworks/weave-kube:2.3.0' - ports: - - name: metrics - containerPort: 6782 - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - initialDelaySeconds: 30 - resources: - requests: - cpu: {{ or .Networking.Weave.CPURequest "50m" }} - memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.CPULimit }} - cpu: {{ .Networking.Weave.CPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: weave-npc - args: - - '--use-legacy-netpol' - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: 'weaveworks/weave-npc:2.3.0' - ports: - - name: metrics - containerPort: 6781 - resources: - requests: - cpu: 50m - memory: 200Mi - limits: - memory: 200Mi - securityContext: - privileged: true - hostNetwork: true - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules diff --git a/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template b/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template deleted file mode 100644 index 7baaf7df1e..0000000000 --- a/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template +++ /dev/null @@ -1,138 +0,0 @@ -# ------------------------------------------ -# Config Map -# ------------------------------------------ -apiVersion: v1 -kind: ConfigMap -metadata: - name: spotinst-kubernetes-cluster-controller-config - namespace: kube-system -data: - spotinst.token: {{ SpotinstToken }} - spotinst.account: {{ SpotinstAccount }} - spotinst.cluster-identifier: {{ ClusterName }} ---- -# ------------------------------------------ -# Secret -# ------------------------------------------ -apiVersion: v1 -kind: Secret -metadata: - name: spotinst-kubernetes-cluster-controller-certs - namespace: kube-system -type: Opaque ---- -# ------------------------------------------ -# Service Account -# ------------------------------------------ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spotinst-kubernetes-cluster-controller - namespace: kube-system ---- -# ------------------------------------------ -# Cluster Role -# ------------------------------------------ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: spotinst-kubernetes-cluster-controller - namespace: kube-system -rules: -- apiGroups: [""] - resources: ["pods", "nodes", "replicationcontrollers", "events", "limitranges", "services", "persistentvolumes", "persistentvolumeclaims", "namespaces"] - verbs: ["get", "delete", "list", "patch", "update"] -- apiGroups: ["apps"] - resources: ["deployments"] - verbs: ["get","list","patch"] -- apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get","list"] -- apiGroups: ["rbac.authorization.k8s.io"] - resources: ["clusterroles"] - verbs: ["patch", "update", "escalate"] -- apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["list"] -- apiGroups: ["metrics.k8s.io"] - resources: ["pods"] - verbs: ["list"] -- nonResourceURLs: ["/version/", "/version"] - verbs: ["get"] ---- -# ------------------------------------------ -# Cluster Role Binding -# ------------------------------------------ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: spotinst-kubernetes-cluster-controller - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: spotinst-kubernetes-cluster-controller -subjects: -- kind: ServiceAccount - name: spotinst-kubernetes-cluster-controller - namespace: kube-system ---- -# ------------------------------------------ -# Deployment -# ------------------------------------------ -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io - name: spotinst-kubernetes-cluster-controller - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io - template: - metadata: - labels: - k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io - spec: - containers: - - name: spotinst-kubernetes-cluster-controller - imagePullPolicy: Always - image: spotinst/kubernetes-cluster-controller:1.0.39 - volumeMounts: - - name: spotinst-kubernetes-cluster-controller-certs - mountPath: /certs - livenessProbe: - httpGet: - path: /healthcheck - port: 4401 - initialDelaySeconds: 300 - periodSeconds: 30 - env: - - name: SPOTINST_TOKEN - valueFrom: - configMapKeyRef: - name: spotinst-kubernetes-cluster-controller-config - key: spotinst.token - - name: SPOTINST_ACCOUNT - valueFrom: - configMapKeyRef: - name: spotinst-kubernetes-cluster-controller-config - key: spotinst.account - - name: CLUSTER_IDENTIFIER - valueFrom: - configMapKeyRef: - name: spotinst-kubernetes-cluster-controller-config - key: spotinst.cluster-identifier - volumes: - - name: spotinst-kubernetes-cluster-controller-certs - secret: - secretName: spotinst-kubernetes-cluster-controller-certs - serviceAccountName: spotinst-kubernetes-cluster-controller - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule ---- diff --git a/upup/models/cloudup/resources/addons/storage-aws.addons.k8s.io/v1.6.0.yaml b/upup/models/cloudup/resources/addons/storage-aws.addons.k8s.io/v1.6.0.yaml deleted file mode 100644 index 7c8b21c084..0000000000 --- a/upup/models/cloudup/resources/addons/storage-aws.addons.k8s.io/v1.6.0.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: storage.k8s.io/v1beta1 -kind: StorageClass -metadata: - name: default - labels: - k8s-addon: storage-aws.addons.k8s.io -provisioner: kubernetes.io/aws-ebs -parameters: - type: gp2 - ---- - -apiVersion: storage.k8s.io/v1beta1 -kind: StorageClass -metadata: - name: gp2 - annotations: - storageclass.beta.kubernetes.io/is-default-class: "true" - labels: - k8s-addon: storage-aws.addons.k8s.io -provisioner: kubernetes.io/aws-ebs -parameters: - type: gp2 - diff --git a/upup/models/cloudup/resources/addons/storage-gce.addons.k8s.io/v1.6.0.yaml b/upup/models/cloudup/resources/addons/storage-gce.addons.k8s.io/v1.6.0.yaml deleted file mode 100644 index 2174c9c2c3..0000000000 --- a/upup/models/cloudup/resources/addons/storage-gce.addons.k8s.io/v1.6.0.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1beta1 -kind: StorageClass -metadata: - name: standard - annotations: - storageclass.beta.kubernetes.io/is-default-class: "true" - labels: - kubernetes.io/cluster-service: "true" - k8s-addon: storage-gce.addons.k8s.io - addonmanager.kubernetes.io/mode: EnsureExists -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-standard diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 272cb00221..3e59bec4b5 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -238,20 +238,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { key := "kube-dns.addons.k8s.io" version := "1.14.13-kops.1" - { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - { location := key + "/k8s-1.6.yaml" id := "k8s-1.6" @@ -261,7 +247,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -296,7 +282,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -345,12 +331,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { id := "k8s-1.8" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -368,12 +353,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { id := "k8s-1.9" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.9.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -398,20 +382,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { key := "dns-controller.addons.k8s.io" version := "1.17.0-alpha.1" - { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - { location := key + "/k8s-1.6.yaml" id := "k8s-1.6" @@ -421,7 +391,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -447,20 +417,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { key := "external-dns.addons.k8s.io" version := "0.4.4" - { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - { location := key + "/k8s-1.6.yaml" id := "k8s-1.6" @@ -470,7 +426,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -518,21 +474,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.15.0", - Id: id, - }) - } - - { - id := "v1.6.0" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.7.0", + KubernetesVersion: "<1.15.0", Id: id, }) } @@ -547,12 +489,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { location := key + "/" + id + ".yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -561,31 +502,16 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { key := "storage-gce.addons.k8s.io" version := "1.7.0" - { - id := "v1.6.0" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.7.0", - Id: id, - }) - } - { id := "v1.7.0" location := key + "/" + id + ".yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -593,33 +519,17 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { if featureflag.Spotinst.Enabled() { key := "spotinst-kubernetes-cluster-controller.addons.k8s.io" - { - id := "v1.8.0" - location := key + "/" + id + ".yaml" - version := "1.0.39" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.9.0", - Id: id, - }) - } - { id := "v1.9.0" location := key + "/" + id + ".yaml" version := "1.0.39" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.9.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } @@ -662,20 +572,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { key := "networking.kope.io" version := "1.0.20181028-kops.1" - { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - { location := key + "/k8s-1.6.yaml" id := "k8s-1.6" @@ -685,7 +581,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -708,53 +604,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { if b.cluster.Spec.Networking.Weave != nil { key := "networking.weave" versions := map[string]string{ - "pre-k8s-1.6": "2.3.0-kops.3", - "k8s-1.6": "2.3.0-kops.3", - "k8s-1.7": "2.6.0-kops.2", - "k8s-1.8": "2.6.0-kops.2", - "k8s-1.12": "2.6.0-kops.3", - } - - { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.7.0", - Id: id, - }) - } - - { - location := key + "/k8s-1.7.yaml" - id := "k8s-1.7" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.8.0", - Id: id, - }) + "k8s-1.8": "2.6.0-kops.2", + "k8s-1.12": "2.6.0-kops.3", } { @@ -766,7 +617,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -789,23 +640,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { if b.cluster.Spec.Networking.Flannel != nil { key := "networking.flannel" versions := map[string]string{ - "pre-k8s-1.6": "0.11.0-kops.1", - "k8s-1.6": "0.11.0-kops.2", - "k8s-1.12": "0.11.0-kops.3", - } - - { - location := key + "/pre-k8s-1.6.yaml" - id := "pre-k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) + "k8s-1.6": "0.11.0-kops.2", + "k8s-1.12": "0.11.0-kops.3", } { @@ -817,7 +653,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -840,12 +676,10 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { if b.cluster.Spec.Networking.Calico != nil { key := "networking.projectcalico.org" versions := map[string]string{ - "pre-k8s-1.6": "2.4.2-kops.1", - "k8s-1.6": "2.6.9-kops.1", - "k8s-1.7": "2.6.12-kops.1", - "k8s-1.7-v3": "3.8.0-kops.2", - "k8s-1.12": "3.9.3-kops.2", - "k8s-1.16": "3.10.2-kops.1", + "k8s-1.7": "2.6.12-kops.1", + "k8s-1.7-v3": "3.8.0-kops.2", + "k8s-1.12": "3.9.3-kops.2", + "k8s-1.16": "3.10.2-kops.1", } { @@ -886,39 +720,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } } else { - { - id := "pre-k8s-1.6" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - - { - id := "k8s-1.6" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.7.0", - Id: id, - }) - } - { id := "k8s-1.7" location := key + "/" + id + ".yaml" @@ -928,7 +734,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -938,53 +744,9 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { if b.cluster.Spec.Networking.Canal != nil { key := "networking.projectcalico.org.canal" versions := map[string]string{ - "pre-k8s-1.6": "2.4.2-kops.2", - "k8s-1.6": "2.4.2-kops.2", - "k8s-1.8": "2.6.7-kops.3", - "k8s-1.9": "3.2.3-kops.1", - "k8s-1.12": "3.7.4-kops.1", - "k8s-1.15": "3.10.2-kops.1", - } - { - id := "pre-k8s-1.6" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.6.0", - Id: id, - }) - } - - { - id := "k8s-1.6" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.8.0", - Id: id, - }) - } - - { - id := "k8s-1.8" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0 <1.9.0", - Id: id, - }) + "k8s-1.9": "3.2.3-kops.1", + "k8s-1.12": "3.7.4-kops.1", + "k8s-1.15": "3.10.2-kops.1", } { id := "k8s-1.9" @@ -995,7 +757,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.9.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -1040,7 +802,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.6.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -1073,7 +835,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -1097,26 +859,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { key := "networking.amazon-vpc-routed-eni" versions := map[string]string{ - "k8s-1.7": "1.5.0-kops.1", "k8s-1.8": "1.5.0-kops.1", "k8s-1.10": "1.5.0-kops.1", "k8s-1.12": "1.5.5-kops.1", } - { - id := "k8s-1.7" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.8.0", - Id: id, - }) - } - { id := "k8s-1.8" location := key + "/" + id + ".yaml" @@ -1126,7 +873,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0 <1.10.0", + KubernetesVersion: "<1.10.0", Id: id, }) } @@ -1173,7 +920,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -1209,7 +956,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: authenticationSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.8.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } @@ -1310,7 +1057,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { Version: fi.String(version), Selector: map[string]string{"k8s-addon": key}, Manifest: fi.String(location), - KubernetesVersion: ">=1.7.0 <1.12.0", + KubernetesVersion: "<1.12.0", Id: id, }) } From 789a5404c8fd09e23d35cbe22e4ddbddbd0d1892 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Fri, 27 Dec 2019 20:41:15 -0800 Subject: [PATCH 15/42] update-expected.sh --- .../amazonvpc/manifest.yaml | 42 ++------------ .../cilium/manifest.yaml | 34 ++--------- .../simple/manifest.yaml | 32 +--------- .../weave/manifest.yaml | 58 ++----------------- 4 files changed, 15 insertions(+), 151 deletions(-) diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml index 6588b5aca4..65f809ae6b 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml @@ -18,16 +18,8 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.14.13-kops.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895 name: kube-dns.addons.k8s.io @@ -43,7 +35,6 @@ spec: k8s-addon: kube-dns.addons.k8s.io version: 1.14.13-kops.1 - id: k8s-1.8 - kubernetesVersion: '>=1.8.0' manifest: rbac.addons.k8s.io/k8s-1.8.yaml manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914 name: rbac.addons.k8s.io @@ -51,7 +42,6 @@ spec: k8s-addon: rbac.addons.k8s.io version: 1.8.0 - id: k8s-1.9 - kubernetesVersion: '>=1.9.0' manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745 name: kubelet-api.rbac.addons.k8s.io @@ -64,16 +54,8 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.17.0-alpha.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01 name: dns-controller.addons.k8s.io @@ -97,31 +79,15 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - id: v1.7.0 - kubernetesVersion: '>=1.7.0 <1.15.0' + kubernetesVersion: <1.15.0 manifest: storage-aws.addons.k8s.io/v1.7.0.yaml manifestHash: 62705a596142e6cc283280e8aa973e51536994c5 name: storage-aws.addons.k8s.io selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: v1.6.0 - kubernetesVersion: <1.7.0 - manifest: storage-aws.addons.k8s.io/v1.6.0.yaml - manifestHash: 7de4b2eb0521d669172038759c521418711d8266 - name: storage-aws.addons.k8s.io - selector: - k8s-addon: storage-aws.addons.k8s.io - version: 1.15.0 - - id: k8s-1.7 - kubernetesVersion: '>=1.7.0 <1.8.0' - manifest: networking.amazon-vpc-routed-eni/k8s-1.7.yaml - manifestHash: 394edf46a78e6d1f6dda920b0214afcd4ce34bc3 - name: networking.amazon-vpc-routed-eni - selector: - role.kubernetes.io/networking: "1" - version: 1.5.0-kops.1 - id: k8s-1.8 - kubernetesVersion: '>=1.8.0 <1.10.0' + kubernetesVersion: <1.10.0 manifest: networking.amazon-vpc-routed-eni/k8s-1.8.yaml manifestHash: 544fd24d754b32e8896dba6113f1053a4ba86694 name: networking.amazon-vpc-routed-eni diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index a455fcc990..14e4f3ae50 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -18,16 +18,8 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: 90f1e4bedea6da183eb4c6788879f7297119ff3e - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.14.13-kops.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifestHash: c74ca65f461c764fc9682c6d9ec171b241bec335 name: kube-dns.addons.k8s.io @@ -43,7 +35,6 @@ spec: k8s-addon: kube-dns.addons.k8s.io version: 1.14.13-kops.1 - id: k8s-1.8 - kubernetesVersion: '>=1.8.0' manifest: rbac.addons.k8s.io/k8s-1.8.yaml manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914 name: rbac.addons.k8s.io @@ -51,7 +42,6 @@ spec: k8s-addon: rbac.addons.k8s.io version: 1.8.0 - id: k8s-1.9 - kubernetesVersion: '>=1.9.0' manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745 name: kubelet-api.rbac.addons.k8s.io @@ -64,16 +54,8 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.17.0-alpha.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01 name: dns-controller.addons.k8s.io @@ -97,23 +79,15 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - id: v1.7.0 - kubernetesVersion: '>=1.7.0 <1.15.0' + kubernetesVersion: <1.15.0 manifest: storage-aws.addons.k8s.io/v1.7.0.yaml manifestHash: 62705a596142e6cc283280e8aa973e51536994c5 name: storage-aws.addons.k8s.io selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: v1.6.0 - kubernetesVersion: <1.7.0 - manifest: storage-aws.addons.k8s.io/v1.6.0.yaml - manifestHash: 7de4b2eb0521d669172038759c521418711d8266 - name: storage-aws.addons.k8s.io - selector: - k8s-addon: storage-aws.addons.k8s.io - version: 1.15.0 - id: k8s-1.7 - kubernetesVersion: '>=1.7.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: networking.cilium.io/k8s-1.7.yaml manifestHash: 2d40b9ab7453b4a0a413196fae4c8bdcd62c69ce name: networking.cilium.io diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml index e07c739f65..db1410f2da 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml @@ -18,16 +18,8 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: 90f1e4bedea6da183eb4c6788879f7297119ff3e - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.14.13-kops.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifestHash: c74ca65f461c764fc9682c6d9ec171b241bec335 name: kube-dns.addons.k8s.io @@ -43,7 +35,6 @@ spec: k8s-addon: kube-dns.addons.k8s.io version: 1.14.13-kops.1 - id: k8s-1.8 - kubernetesVersion: '>=1.8.0' manifest: rbac.addons.k8s.io/k8s-1.8.yaml manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914 name: rbac.addons.k8s.io @@ -51,7 +42,6 @@ spec: k8s-addon: rbac.addons.k8s.io version: 1.8.0 - id: k8s-1.9 - kubernetesVersion: '>=1.9.0' manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745 name: kubelet-api.rbac.addons.k8s.io @@ -64,16 +54,8 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.17.0-alpha.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01 name: dns-controller.addons.k8s.io @@ -97,18 +79,10 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - id: v1.7.0 - kubernetesVersion: '>=1.7.0 <1.15.0' + kubernetesVersion: <1.15.0 manifest: storage-aws.addons.k8s.io/v1.7.0.yaml manifestHash: 62705a596142e6cc283280e8aa973e51536994c5 name: storage-aws.addons.k8s.io selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: v1.6.0 - kubernetesVersion: <1.7.0 - manifest: storage-aws.addons.k8s.io/v1.6.0.yaml - manifestHash: 7de4b2eb0521d669172038759c521418711d8266 - name: storage-aws.addons.k8s.io - selector: - k8s-addon: storage-aws.addons.k8s.io - version: 1.15.0 diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml index a53832104d..538eea9c43 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml @@ -18,16 +18,8 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: 90f1e4bedea6da183eb4c6788879f7297119ff3e - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.14.13-kops.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml manifestHash: c74ca65f461c764fc9682c6d9ec171b241bec335 name: kube-dns.addons.k8s.io @@ -43,7 +35,6 @@ spec: k8s-addon: kube-dns.addons.k8s.io version: 1.14.13-kops.1 - id: k8s-1.8 - kubernetesVersion: '>=1.8.0' manifest: rbac.addons.k8s.io/k8s-1.8.yaml manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914 name: rbac.addons.k8s.io @@ -51,7 +42,6 @@ spec: k8s-addon: rbac.addons.k8s.io version: 1.8.0 - id: k8s-1.9 - kubernetesVersion: '>=1.9.0' manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745 name: kubelet-api.rbac.addons.k8s.io @@ -64,16 +54,8 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml - manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.17.0-alpha.1 - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01 name: dns-controller.addons.k8s.io @@ -97,47 +79,15 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - id: v1.7.0 - kubernetesVersion: '>=1.7.0 <1.15.0' + kubernetesVersion: <1.15.0 manifest: storage-aws.addons.k8s.io/v1.7.0.yaml manifestHash: 62705a596142e6cc283280e8aa973e51536994c5 name: storage-aws.addons.k8s.io selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: v1.6.0 - kubernetesVersion: <1.7.0 - manifest: storage-aws.addons.k8s.io/v1.6.0.yaml - manifestHash: 7de4b2eb0521d669172038759c521418711d8266 - name: storage-aws.addons.k8s.io - selector: - k8s-addon: storage-aws.addons.k8s.io - version: 1.15.0 - - id: pre-k8s-1.6 - kubernetesVersion: <1.6.0 - manifest: networking.weave/pre-k8s-1.6.yaml - manifestHash: 8e7a361fff381e0ed84e0011506ff3bfdc7bc202 - name: networking.weave - selector: - role.kubernetes.io/networking: "1" - version: 2.3.0-kops.3 - - id: k8s-1.6 - kubernetesVersion: '>=1.6.0 <1.7.0' - manifest: networking.weave/k8s-1.6.yaml - manifestHash: 6dcb06c0178143b534dac093fcad00c331b12319 - name: networking.weave - selector: - role.kubernetes.io/networking: "1" - version: 2.3.0-kops.3 - - id: k8s-1.7 - kubernetesVersion: '>=1.7.0 <1.8.0' - manifest: networking.weave/k8s-1.7.yaml - manifestHash: 29f0b9379ffda0cc4288c9769371dc5adc75687a - name: networking.weave - selector: - role.kubernetes.io/networking: "1" - version: 2.6.0-kops.2 - id: k8s-1.8 - kubernetesVersion: '>=1.8.0 <1.12.0' + kubernetesVersion: <1.12.0 manifest: networking.weave/k8s-1.8.yaml manifestHash: 75cc6479f36f443600f567e492707efcbb9fbd31 name: networking.weave From 5f72d121320e3388a017478ed29e77e5d63abcb9 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sun, 12 Jan 2020 21:27:23 -0800 Subject: [PATCH 16/42] Reduce test flakiness --- pkg/instancegroups/rollingupdate_test.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/pkg/instancegroups/rollingupdate_test.go b/pkg/instancegroups/rollingupdate_test.go index 7084c40f51..6863b6b14d 100644 --- a/pkg/instancegroups/rollingupdate_test.go +++ b/pkg/instancegroups/rollingupdate_test.go @@ -693,7 +693,11 @@ func (c *concurrentTest) Validate() (*validation.ValidationCluster, error) { c.t.Errorf("unexpected call to Validate with %d termination requests left", terminationRequestsLeft) case 4: assert.Equal(c.t, 6, c.previousValidation, "previous validation") - c.terminationChan <- true + select { + case c.terminationChan <- true: + default: + c.t.Error("terminationChan is full") + } c.mutex.Unlock() select { case <-c.validationChan: @@ -705,7 +709,11 @@ func (c *concurrentTest) Validate() (*validation.ValidationCluster, error) { assert.Equal(c.t, 4, c.previousValidation, "previous validation") case 1: assert.Equal(c.t, 2, c.previousValidation, "previous validation") - c.terminationChan <- true + select { + case c.terminationChan <- true: + default: + c.t.Error("terminationChan is full") + } c.mutex.Unlock() select { case <-c.validationChan: @@ -745,8 +753,12 @@ func (c *concurrentTest) TerminateInstanceInAutoScalingGroup(input *autoscaling. } func (c *concurrentTest) delayThenWakeValidation() { - time.Sleep(2 * time.Millisecond) // NodeInterval plus some - c.validationChan <- true + time.Sleep(20 * time.Millisecond) // NodeInterval plus some + select { + case c.validationChan <- true: + default: + c.t.Error("validationChan is full") + } } func (c *concurrentTest) AssertComplete() { From b4bfdcbfacea1ed2e415fff37bd21ca2305a36c2 Mon Sep 17 00:00:00 2001 From: GuyTempleton Date: Tue, 14 Jan 2020 10:43:22 +0000 Subject: [PATCH 17/42] CoreDNS default image bump to 1.6.6 Also updates the default corefile config to make use of the new lameduck functionality for healthcheck --- .../addons/coredns.addons.k8s.io/k8s-1.12.yaml.template | 9 ++++----- .../addons/coredns.addons.k8s.io/k8s-1.6.yaml.template | 6 ++++-- upup/pkg/fi/cloudup/bootstrapchannelbuilder.go | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template index 2edf93ae1d..8c8c20080a 100644 --- a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template +++ b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template @@ -64,7 +64,9 @@ data: {{- else }} .:53 { errors - health + health { + lameduck 5s + } kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { pods insecure upstream @@ -165,7 +167,7 @@ spec: beta.kubernetes.io/os: linux containers: - name: coredns - image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.3.1{{ end }} + image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.6.6{{ end }} imagePullPolicy: IfNotPresent resources: limits: @@ -175,9 +177,6 @@ spec: memory: {{ KubeDNS.MemoryRequest }} args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - # Workaround for 1.3.1 bug, can be removed after bumping to 1.4+. See: https://github.com/coredns/coredns/pull/2529 - - name: tmp - mountPath: /tmp - name: config-volume mountPath: /etc/coredns readOnly: true diff --git a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template index bfdba31fc7..c10d748d5b 100644 --- a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template +++ b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template @@ -64,7 +64,9 @@ data: {{- else }} .:53 { errors - health + health { + lameduck 5s + } kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { pods insecure upstream @@ -111,7 +113,7 @@ spec: beta.kubernetes.io/os: linux containers: - name: coredns - image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.3.1{{ end }} + image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.6.6{{ end }} imagePullPolicy: IfNotPresent resources: limits: diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index e003a076ac..646b41b2ad 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -285,7 +285,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { if kubeDNS.Provider == "CoreDNS" { { key := "coredns.addons.k8s.io" - version := "1.3.1-kops.5" + version := "1.6.6-kops.1" { location := key + "/k8s-1.6.yaml" @@ -304,7 +304,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons { { key := "coredns.addons.k8s.io" - version := "1.3.1-kops.5" + version := "1.6.6-kops.1" { location := key + "/k8s-1.12.yaml" From 4236ce115c0fa71c6e720b8209afb90504578cc5 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Wed, 15 Jan 2020 14:06:51 -0800 Subject: [PATCH 18/42] Don't load nonexistent calico-client cert when CNI is Cilium --- upup/pkg/fi/nodeup/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upup/pkg/fi/nodeup/command.go b/upup/pkg/fi/nodeup/command.go index 25559e3351..c9449c644f 100644 --- a/upup/pkg/fi/nodeup/command.go +++ b/upup/pkg/fi/nodeup/command.go @@ -254,7 +254,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error { } else { loader.Builders = append(loader.Builders, &model.KubeRouterBuilder{NodeupModelContext: modelContext}) } - if c.cluster.Spec.Networking.Calico != nil || c.cluster.Spec.Networking.Cilium != nil { + if c.cluster.Spec.Networking.Calico != nil { loader.Builders = append(loader.Builders, &model.EtcdTLSBuilder{NodeupModelContext: modelContext}) } From 549f54de48a05661a5061b91355fcd537da64ef5 Mon Sep 17 00:00:00 2001 From: Christian van der Leeden Date: Sun, 15 Dec 2019 12:38:23 +0100 Subject: [PATCH 19/42] Enabling JSON output for Terraform instead of writing the HCL syntax tf file. JSON syntax is officially supported in 0.12 and a terraform version requirement will be set. For previous installations you need to delete the .tf file by hand. JSON generation will fail if kubernetes.tf is present. Added Integration Test using minimal test setup Added documentation. For terraform 0.12 support the resource names need to be changed still --- cmd/kops/integration_test.go | 99 ++-- docs/advanced/experimental.md | 1 + docs/terraform.md | 13 + pkg/featureflag/featureflag.go | 2 + .../update_cluster/minimal-json/id_rsa.pub | 1 + .../minimal-json/in-v1alpha0.yaml | 76 +++ .../minimal-json/kubernetes.tf.json | 507 ++++++++++++++++++ upup/pkg/fi/cloudup/terraform/target.go | 17 +- 8 files changed, 671 insertions(+), 45 deletions(-) create mode 100755 tests/integration/update_cluster/minimal-json/id_rsa.pub create mode 100644 tests/integration/update_cluster/minimal-json/in-v1alpha0.yaml create mode 100644 tests/integration/update_cluster/minimal-json/kubernetes.tf.json diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 70a8d69d5f..a8b8215ab2 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -52,20 +52,20 @@ const updateClusterTestBase = "../../tests/integration/update_cluster/" // TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a func TestMinimal(t *testing.T) { - runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true, false, nil, true) - runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true, false, nil, true) - runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true, false, nil, true, false) + runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true, false, nil, true, false) + runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, false, nil, true, false) } // TestRestrictAccess runs the test on a simple SG configuration, similar to kops create cluster minimal.example.com --ssh-access=$(IPS) --admin-access=$(IPS) --master-count=3 func TestRestrictAccess(t *testing.T) { - runTestAWS(t, "restrictaccess.example.com", "restrict_access", "v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "restrictaccess.example.com", "restrict_access", "v1alpha2", false, 1, true, false, nil, true, false) } // TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3 func TestHA(t *testing.T) { - runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true, false, nil, true) - runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true, false, nil, true) + runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true, false, nil, true, false) + runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true, false, nil, true, false) } // TestHighAvailabilityGCE runs the test on a simple HA GCE configuration, similar to kops create cluster ha-gce.example.com @@ -76,14 +76,14 @@ func TestHighAvailabilityGCE(t *testing.T) { // TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases func TestComplex(t *testing.T) { - runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true, false, nil, true) - runTestAWS(t, "complex.example.com", "complex", "legacy-v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true, false, nil, true, false) + runTestAWS(t, "complex.example.com", "complex", "legacy-v1alpha2", false, 1, true, false, nil, true, false) runTestCloudformation(t, "complex.example.com", "complex", "v1alpha2", false, nil, true) } func TestNoSSHKey(t *testing.T) { - runTestAWS(t, "nosshkey.example.com", "nosshkey", "v1alpha2", false, 1, true, false, nil, false) + runTestAWS(t, "nosshkey.example.com", "nosshkey", "v1alpha2", false, 1, true, false, nil, false, false) } func TestNoSSHKeyCloudformation(t *testing.T) { @@ -92,7 +92,7 @@ func TestNoSSHKeyCloudformation(t *testing.T) { // TestCrossZone tests that the cross zone setting on the API ELB is set properly func TestCrossZone(t *testing.T) { - runTestAWS(t, "crosszone.example.com", "api_elb_cross_zone", "v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "crosszone.example.com", "api_elb_cross_zone", "v1alpha2", false, 1, true, false, nil, true, false) } // TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a @@ -108,7 +108,7 @@ func TestExistingIAMCloudformation(t *testing.T) { // TestExistingSG runs the test with existing Security Group, similar to kops create cluster minimal.example.com --zones us-west-1a func TestExistingSG(t *testing.T) { - runTestAWS(t, "existingsg.example.com", "existing_sg", "v1alpha2", false, 3, true, false, nil, true) + runTestAWS(t, "existingsg.example.com", "existing_sg", "v1alpha2", false, 3, true, false, nil, true, false) } // TestAdditionalUserData runs the test on passing additional user-data to an instance at bootstrap. @@ -118,83 +118,93 @@ func TestAdditionalUserData(t *testing.T) { // TestBastionAdditionalUserData runs the test on passing additional user-data to a bastion instance group func TestBastionAdditionalUserData(t *testing.T) { - runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true, false, nil, true, false) +} + +// TestMinimal_JSON runs the test on a minimal data set and outputs JSON +func TestMinimal_json(t *testing.T) { + featureflag.ParseFlags("+TerraformJSON") + unsetFeaureFlag := func() { + featureflag.ParseFlags("-TerraformJSON") + } + defer unsetFeaureFlag() + runTestAWS(t, "minimal-json.example.com", "minimal-json", "v1alpha0", false, 1, true, false, nil, true, true) } // TestMinimal_141 runs the test on a configuration from 1.4.1 release func TestMinimal_141(t *testing.T) { - runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true, false, nil, true) + runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true, false, nil, true, false) } // TestPrivateWeave runs the test on a configuration with private topology, weave networking func TestPrivateWeave(t *testing.T) { - runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true, false, nil, true) - runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true, false, nil, true, false) + runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true, false, nil, true, false) } // TestPrivateFlannel runs the test on a configuration with private topology, flannel networking func TestPrivateFlannel(t *testing.T) { - runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true, false, nil, true) - runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true, false, nil, true, false) + runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true, false, nil, true, false) } // TestPrivateCalico runs the test on a configuration with private topology, calico networking func TestPrivateCalico(t *testing.T) { - runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true, false, nil, true) - runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true, false, nil, true, false) + runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true, false, nil, true, false) runTestCloudformation(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, nil, true) } // TestPrivateCanal runs the test on a configuration with private topology, canal networking func TestPrivateCanal(t *testing.T) { - runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true, false, nil, true) - runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true, false, nil, true, false) + runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true, false, nil, true, false) } // TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking func TestPrivateKopeio(t *testing.T) { - runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true, false, nil, true, false) } // TestUnmanaged is a test where all the subnets opt-out of route management func TestUnmanaged(t *testing.T) { - runTestAWS(t, "unmanaged.example.com", "unmanaged", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "unmanaged.example.com", "unmanaged", "v1alpha2", true, 1, true, false, nil, true, false) } // TestPrivateSharedSubnet runs the test on a configuration with private topology & shared subnets func TestPrivateSharedSubnet(t *testing.T) { - runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true, false, nil, true, false) } // TestPrivateDns1 runs the test on a configuration with private topology, private dns func TestPrivateDns1(t *testing.T) { - runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true, false, nil, true, false) } // TestPrivateDns2 runs the test on a configuration with private topology, private dns, extant vpc func TestPrivateDns2(t *testing.T) { - runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true, false, nil, true) + runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true, false, nil, true, false) } // TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC) func TestSharedSubnet(t *testing.T) { - runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true, false, nil, true, false) } // TestSharedVPC runs the test on a configuration with a shared VPC func TestSharedVPC(t *testing.T) { - runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true, false, nil, true, false) } // TestExistingIAM runs the test on a configuration with existing IAM instance profiles func TestExistingIAM(t *testing.T) { lifecycleOverrides := []string{"IAMRole=ExistsAndWarnIfChanges", "IAMRolePolicy=ExistsAndWarnIfChanges", "IAMInstanceProfileRole=ExistsAndWarnIfChanges"} - runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false, false, lifecycleOverrides, true) + runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false, false, lifecycleOverrides, true, false) } // TestAdditionalCIDR runs the test on a configuration with a shared VPC func TestAdditionalCIDR(t *testing.T) { - runTestAWS(t, "additionalcidr.example.com", "additional_cidr", "v1alpha3", false, 3, true, false, nil, true) + runTestAWS(t, "additionalcidr.example.com", "additional_cidr", "v1alpha3", false, 3, true, false, nil, true, false) runTestCloudformation(t, "additionalcidr.example.com", "additional_cidr", "v1alpha2", false, nil, true) } @@ -204,7 +214,7 @@ func TestPhaseNetwork(t *testing.T) { } func TestExternalLoadBalancer(t *testing.T) { - runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true, false, nil, true) + runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true, false, nil, true, false) runTestCloudformation(t, "externallb.example.com", "externallb", "v1alpha2", false, nil, true) } @@ -223,13 +233,13 @@ func TestPhaseCluster(t *testing.T) { // TestMixedInstancesASG tests ASGs using a mixed instance policy func TestMixedInstancesASG(t *testing.T) { - runTestAWS(t, "mixedinstances.example.com", "mixed_instances", "v1alpha2", false, 3, true, true, nil, true) + runTestAWS(t, "mixedinstances.example.com", "mixed_instances", "v1alpha2", false, 3, true, true, nil, true, false) runTestCloudformation(t, "mixedinstances.example.com", "mixed_instances", "v1alpha2", false, nil, true) } // TestMixedInstancesSpotASG tests ASGs using a mixed instance policy and spot instances func TestMixedInstancesSpotASG(t *testing.T) { - runTestAWS(t, "mixedinstances.example.com", "mixed_instances_spot", "v1alpha2", false, 3, true, true, nil, true) + runTestAWS(t, "mixedinstances.example.com", "mixed_instances_spot", "v1alpha2", false, 3, true, true, nil, true, false) runTestCloudformation(t, "mixedinstances.example.com", "mixed_instances_spot", "v1alpha2", false, nil, true) } @@ -238,7 +248,7 @@ func TestContainerdCloudformation(t *testing.T) { runTestCloudformation(t, "containerd.example.com", "containerd-cloudformation", "v1alpha2", false, nil, true) } -func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, phase *cloudup.Phase, lifecycleOverrides []string, sshKey bool) { +func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, expectedTfFileName string, phase *cloudup.Phase, lifecycleOverrides []string, sshKey bool) { var stdout bytes.Buffer srcDir = updateClusterTestBase + srcDir @@ -250,6 +260,10 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri testDataTFPath = tfFileName } + if expectedTfFileName != "" { + actualTFPath = expectedTfFileName + } + factoryOptions := &util.FactoryOptions{} factoryOptions.RegistryPath = "memfs://tests" @@ -312,10 +326,10 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri sort.Strings(fileNames) actualFilenames := strings.Join(fileNames, ",") - expectedFilenames := "kubernetes.tf" + expectedFilenames := actualTFPath if len(expectedDataFilenames) > 0 { - expectedFilenames = "data,kubernetes.tf" + expectedFilenames = "data," + actualTFPath } if actualFilenames != expectedFilenames { @@ -392,10 +406,15 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri } } -func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool, launchTemplate bool, lifecycleOverrides []string, sshKey bool) { +func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool, launchTemplate bool, lifecycleOverrides []string, sshKey bool, jsonOutput bool) { + tfFileName := "" h := testutils.NewIntegrationTestHarness(t) defer h.Close() + if jsonOutput { + tfFileName = "kubernetes.tf.json" + } + h.MockKopsVersion("1.15.0") h.SetupMockAWS() @@ -431,7 +450,7 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, }...) } } - runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, lifecycleOverrides, sshKey) + runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, tfFileName, nil, lifecycleOverrides, sshKey) } func runTestPhase(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, phase cloudup.Phase, sshKey bool) { @@ -475,7 +494,7 @@ func runTestPhase(t *testing.T, clusterName string, srcDir string, version strin } } - runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, &phase, nil, sshKey) + runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, "", &phase, nil, sshKey) } func runTestGCE(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, sshKey bool) { @@ -504,7 +523,7 @@ func runTestGCE(t *testing.T, clusterName string, srcDir string, version string, expectedFilenames = append(expectedFilenames, prefix+"kops-k8s-io-instance-group-name") } - runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, nil, sshKey) + runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", "", nil, nil, sshKey) } func runTestCloudformation(t *testing.T, clusterName string, srcDir string, version string, private bool, lifecycleOverrides []string, sshKey bool) { diff --git a/docs/advanced/experimental.md b/docs/advanced/experimental.md index 0e5c15c081..cdf0899819 100644 --- a/docs/advanced/experimental.md +++ b/docs/advanced/experimental.md @@ -21,3 +21,4 @@ The following experimental features are currently available: * `+Spotinst` - Enables the use of the Spotinst cloud provider * `+SpotinstOcean` - Enables the use of Spotinst Ocean instance groups * `+SkipEtcdVersionCheck` - Bypasses the check that etcd-manager is using a supported etcd version +* `+TerraformJSON` - Produce kubernetes.ts.json file instead of writing HCL v1 syntax. Can be consumed by terraform 0.12 diff --git a/docs/terraform.md b/docs/terraform.md index 31c623492d..9a11b056ff 100644 --- a/docs/terraform.md +++ b/docs/terraform.md @@ -159,3 +159,16 @@ $ terraform apply ``` You should still run `kops delete cluster ${CLUSTER_NAME}`, to remove the kops cluster specification and any dynamically created Kubernetes resources (ELBs or volumes), but under this workaround also to remove the primary ELB volumes from the `proto` phase. + +#### Terraform JSON output + +With terraform 0.12 JSON is now officially supported as configuration language. To enable JSON output instead of HCLv1 output you need to enable it through a feature flag. +``` +export KOPS_FEATURE_FLAGS=TerraformJSON +kops update cluster ..... +``` + +This is an alternative to of using terraforms own configuration syntax HCL. Be sure to delete the existing kubernetes.tf file. Terraform will otherwise use both and then complain. + +Kops will require terraform 0.12 for JSON configuration. Inofficially (partially) it was also supported with terraform 0.11, so you can try and remove the `required_version` in `kubernetes.tf.json`. + diff --git a/pkg/featureflag/featureflag.go b/pkg/featureflag/featureflag.go index e2a48e6de1..bd34e10a0e 100644 --- a/pkg/featureflag/featureflag.go +++ b/pkg/featureflag/featureflag.go @@ -82,6 +82,8 @@ var ( VSphereCloudProvider = New("VSphereCloudProvider", Bool(false)) // SkipEtcdVersionCheck will bypass the check that etcd-manager is using a supported etcd version SkipEtcdVersionCheck = New("SkipEtcdVersionCheck", Bool(false)) + // Enable terraform JSON output instead of hcl output. JSON output can be also parsed by terraform 0.12 + TerraformJSON = New("TerraformJSON", Bool(false)) ) // FeatureFlag defines a feature flag diff --git a/tests/integration/update_cluster/minimal-json/id_rsa.pub b/tests/integration/update_cluster/minimal-json/id_rsa.pub new file mode 100755 index 0000000000..81cb012783 --- /dev/null +++ b/tests/integration/update_cluster/minimal-json/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/minimal-json/in-v1alpha0.yaml b/tests/integration/update_cluster/minimal-json/in-v1alpha0.yaml new file mode 100644 index 0000000000..0ea5e03ad3 --- /dev/null +++ b/tests/integration/update_cluster/minimal-json/in-v1alpha0.yaml @@ -0,0 +1,76 @@ +apiVersion: kops.k8s.io/v1alpha1 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal-json.example.com +spec: + adminAccess: + - 0.0.0.0/0 + channel: stable + cloudProvider: aws + configBase: memfs://clusters.example.com/minimal-json.example.com + etcdClusters: + - etcdMembers: + - name: us-test-1a + zone: us-test-1a + name: main + - etcdMembers: + - name: us-test-1a + zone: us-test-1a + name: events + kubernetesVersion: v1.14.0 + masterInternalName: api.internal.minimal-json.example.com + masterPublicName: api.minimal-json.example.com + networkCIDR: 172.20.0.0/16 + networking: + kubenet: {} + nonMasqueradeCIDR: 100.64.0.0/10 + topology: + bastion: + idleTimeout: 120 + machineType: t2.medium + masters: public + nodes: public + zones: + - cidr: 172.20.32.0/19 + name: us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha1 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: nodes + labels: + kops.k8s.io/cluster: minimal-json.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: Node + zones: + - us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha1 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: master-us-test-1a + labels: + kops.k8s.io/cluster: minimal-json.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + zones: + - us-test-1a + + diff --git a/tests/integration/update_cluster/minimal-json/kubernetes.tf.json b/tests/integration/update_cluster/minimal-json/kubernetes.tf.json new file mode 100644 index 0000000000..9cc84d0566 --- /dev/null +++ b/tests/integration/update_cluster/minimal-json/kubernetes.tf.json @@ -0,0 +1,507 @@ +{ + "locals": { + "cluster_name": "minimal-json.example.com", + "master_autoscaling_group_ids": [ + "${aws_autoscaling_group.master-us-test-1a-masters-minimal-json-example-com.id}" + ], + "master_security_group_ids": [ + "${aws_security_group.masters-minimal-json-example-com.id}" + ], + "masters_role_arn": "${aws_iam_role.masters-minimal-json-example-com.arn}", + "masters_role_name": "${aws_iam_role.masters-minimal-json-example-com.name}", + "node_autoscaling_group_ids": [ + "${aws_autoscaling_group.nodes-minimal-json-example-com.id}" + ], + "node_security_group_ids": [ + "${aws_security_group.nodes-minimal-json-example-com.id}" + ], + "node_subnet_ids": [ + "${aws_subnet.us-test-1a-minimal-json-example-com.id}" + ], + "nodes_role_arn": "${aws_iam_role.nodes-minimal-json-example-com.arn}", + "nodes_role_name": "${aws_iam_role.nodes-minimal-json-example-com.name}", + "region": "us-test-1", + "route_table_public_id": "${aws_route_table.minimal-json-example-com.id}", + "subnet_us-test-1a_id": "${aws_subnet.us-test-1a-minimal-json-example-com.id}", + "vpc_cidr_block": "${aws_vpc.minimal-json-example-com.cidr_block}", + "vpc_id": "${aws_vpc.minimal-json-example-com.id}" + }, + "output": { + "cluster_name": { + "value": "minimal-json.example.com" + }, + "master_autoscaling_group_ids": { + "value": [ + "${aws_autoscaling_group.master-us-test-1a-masters-minimal-json-example-com.id}" + ] + }, + "master_security_group_ids": { + "value": [ + "${aws_security_group.masters-minimal-json-example-com.id}" + ] + }, + "masters_role_arn": { + "value": "${aws_iam_role.masters-minimal-json-example-com.arn}" + }, + "masters_role_name": { + "value": "${aws_iam_role.masters-minimal-json-example-com.name}" + }, + "node_autoscaling_group_ids": { + "value": [ + "${aws_autoscaling_group.nodes-minimal-json-example-com.id}" + ] + }, + "node_security_group_ids": { + "value": [ + "${aws_security_group.nodes-minimal-json-example-com.id}" + ] + }, + "node_subnet_ids": { + "value": [ + "${aws_subnet.us-test-1a-minimal-json-example-com.id}" + ] + }, + "nodes_role_arn": { + "value": "${aws_iam_role.nodes-minimal-json-example-com.arn}" + }, + "nodes_role_name": { + "value": "${aws_iam_role.nodes-minimal-json-example-com.name}" + }, + "region": { + "value": "us-test-1" + }, + "route_table_public_id": { + "value": "${aws_route_table.minimal-json-example-com.id}" + }, + "subnet_us-test-1a_id": { + "value": "${aws_subnet.us-test-1a-minimal-json-example-com.id}" + }, + "vpc_cidr_block": { + "value": "${aws_vpc.minimal-json-example-com.cidr_block}" + }, + "vpc_id": { + "value": "${aws_vpc.minimal-json-example-com.id}" + } + }, + "provider": { + "aws": { + "region": "us-test-1" + } + }, + "resource": { + "aws_autoscaling_group": { + "master-us-test-1a-masters-minimal-json-example-com": { + "name": "master-us-test-1a.masters.minimal-json.example.com", + "launch_configuration": "${aws_launch_configuration.master-us-test-1a-masters-minimal-json-example-com.id}", + "max_size": 1, + "min_size": 1, + "vpc_zone_identifier": [ + "${aws_subnet.us-test-1a-minimal-json-example-com.id}" + ], + "tag": [ + { + "key": "KubernetesCluster", + "value": "minimal-json.example.com", + "propagate_at_launch": true + }, + { + "key": "Name", + "value": "master-us-test-1a.masters.minimal-json.example.com", + "propagate_at_launch": true + }, + { + "key": "k8s.io/role/master", + "value": "1", + "propagate_at_launch": true + }, + { + "key": "kops.k8s.io/instancegroup", + "value": "master-us-test-1a", + "propagate_at_launch": true + } + ], + "metrics_granularity": "1Minute", + "enabled_metrics": [ + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupMaxSize", + "GroupMinSize", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances" + ] + }, + "nodes-minimal-json-example-com": { + "name": "nodes.minimal-json.example.com", + "launch_configuration": "${aws_launch_configuration.nodes-minimal-json-example-com.id}", + "max_size": 2, + "min_size": 2, + "vpc_zone_identifier": [ + "${aws_subnet.us-test-1a-minimal-json-example-com.id}" + ], + "tag": [ + { + "key": "KubernetesCluster", + "value": "minimal-json.example.com", + "propagate_at_launch": true + }, + { + "key": "Name", + "value": "nodes.minimal-json.example.com", + "propagate_at_launch": true + }, + { + "key": "k8s.io/role/node", + "value": "1", + "propagate_at_launch": true + }, + { + "key": "kops.k8s.io/instancegroup", + "value": "nodes", + "propagate_at_launch": true + } + ], + "metrics_granularity": "1Minute", + "enabled_metrics": [ + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupMaxSize", + "GroupMinSize", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances" + ] + } + }, + "aws_ebs_volume": { + "us-test-1a-etcd-events-minimal-json-example-com": { + "availability_zone": "us-test-1a", + "size": 20, + "type": "gp2", + "encrypted": false, + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "us-test-1a.etcd-events.minimal-json.example.com", + "k8s.io/etcd/events": "us-test-1a/us-test-1a", + "k8s.io/role/master": "1", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + }, + "us-test-1a-etcd-main-minimal-json-example-com": { + "availability_zone": "us-test-1a", + "size": 20, + "type": "gp2", + "encrypted": false, + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "us-test-1a.etcd-main.minimal-json.example.com", + "k8s.io/etcd/main": "us-test-1a/us-test-1a", + "k8s.io/role/master": "1", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + } + }, + "aws_iam_instance_profile": { + "masters-minimal-json-example-com": { + "name": "masters.minimal-json.example.com", + "role": "${aws_iam_role.masters-minimal-json-example-com.name}" + }, + "nodes-minimal-json-example-com": { + "name": "nodes.minimal-json.example.com", + "role": "${aws_iam_role.nodes-minimal-json-example-com.name}" + } + }, + "aws_iam_role": { + "masters-minimal-json-example-com": { + "name": "masters.minimal-json.example.com", + "assume_role_policy": "${file(\"${path.module}/data/aws_iam_role_masters.minimal-json.example.com_policy\")}" + }, + "nodes-minimal-json-example-com": { + "name": "nodes.minimal-json.example.com", + "assume_role_policy": "${file(\"${path.module}/data/aws_iam_role_nodes.minimal-json.example.com_policy\")}" + } + }, + "aws_iam_role_policy": { + "masters-minimal-json-example-com": { + "name": "masters.minimal-json.example.com", + "role": "${aws_iam_role.masters-minimal-json-example-com.name}", + "policy": "${file(\"${path.module}/data/aws_iam_role_policy_masters.minimal-json.example.com_policy\")}" + }, + "nodes-minimal-json-example-com": { + "name": "nodes.minimal-json.example.com", + "role": "${aws_iam_role.nodes-minimal-json-example-com.name}", + "policy": "${file(\"${path.module}/data/aws_iam_role_policy_nodes.minimal-json.example.com_policy\")}" + } + }, + "aws_internet_gateway": { + "minimal-json-example-com": { + "vpc_id": "${aws_vpc.minimal-json-example-com.id}", + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "minimal-json.example.com", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + } + }, + "aws_key_pair": { + "kubernetes-minimal-json-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157": { + "key_name": "kubernetes.minimal-json.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", + "public_key": "${file(\"${path.module}/data/aws_key_pair_kubernetes.minimal-json.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key\")}" + } + }, + "aws_launch_configuration": { + "master-us-test-1a-masters-minimal-json-example-com": { + "name_prefix": "master-us-test-1a.masters.minimal-json.example.com-", + "image_id": "ami-12345678", + "instance_type": "m3.medium", + "key_name": "${aws_key_pair.kubernetes-minimal-json-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}", + "iam_instance_profile": "${aws_iam_instance_profile.masters-minimal-json-example-com.id}", + "security_groups": [ + "${aws_security_group.masters-minimal-json-example-com.id}" + ], + "associate_public_ip_address": true, + "user_data": "${file(\"${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.minimal-json.example.com_user_data\")}", + "root_block_device": { + "volume_type": "gp2", + "volume_size": 64, + "delete_on_termination": true + }, + "ephemeral_block_device": [ + { + "device_name": "/dev/sdc", + "virtual_name": "ephemeral0" + } + ], + "lifecycle": { + "create_before_destroy": true + }, + "enable_monitoring": false + }, + "nodes-minimal-json-example-com": { + "name_prefix": "nodes.minimal-json.example.com-", + "image_id": "ami-12345678", + "instance_type": "t2.medium", + "key_name": "${aws_key_pair.kubernetes-minimal-json-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}", + "iam_instance_profile": "${aws_iam_instance_profile.nodes-minimal-json-example-com.id}", + "security_groups": [ + "${aws_security_group.nodes-minimal-json-example-com.id}" + ], + "associate_public_ip_address": true, + "user_data": "${file(\"${path.module}/data/aws_launch_configuration_nodes.minimal-json.example.com_user_data\")}", + "root_block_device": { + "volume_type": "gp2", + "volume_size": 128, + "delete_on_termination": true + }, + "lifecycle": { + "create_before_destroy": true + }, + "enable_monitoring": false + } + }, + "aws_route": { + "route-0-0-0-0--0": { + "route_table_id": "${aws_route_table.minimal-json-example-com.id}", + "destination_cidr_block": "0.0.0.0/0", + "gateway_id": "${aws_internet_gateway.minimal-json-example-com.id}" + } + }, + "aws_route_table": { + "minimal-json-example-com": { + "vpc_id": "${aws_vpc.minimal-json-example-com.id}", + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "minimal-json.example.com", + "kubernetes.io/cluster/minimal-json.example.com": "owned", + "kubernetes.io/kops/role": "public" + } + } + }, + "aws_route_table_association": { + "us-test-1a-minimal-json-example-com": { + "subnet_id": "${aws_subnet.us-test-1a-minimal-json-example-com.id}", + "route_table_id": "${aws_route_table.minimal-json-example-com.id}" + } + }, + "aws_security_group": { + "masters-minimal-json-example-com": { + "name": "masters.minimal-json.example.com", + "vpc_id": "${aws_vpc.minimal-json-example-com.id}", + "description": "Security group for masters", + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "masters.minimal-json.example.com", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + }, + "nodes-minimal-json-example-com": { + "name": "nodes.minimal-json.example.com", + "vpc_id": "${aws_vpc.minimal-json-example-com.id}", + "description": "Security group for nodes", + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "nodes.minimal-json.example.com", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + } + }, + "aws_security_group_rule": { + "all-master-to-master": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "from_port": 0, + "to_port": 0, + "protocol": "-1" + }, + "all-master-to-node": { + "type": "ingress", + "security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "from_port": 0, + "to_port": 0, + "protocol": "-1" + }, + "all-node-to-node": { + "type": "ingress", + "security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 0, + "to_port": 0, + "protocol": "-1" + }, + "https-external-to-master-0-0-0-0--0": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "from_port": 443, + "to_port": 443, + "protocol": "tcp", + "cidr_blocks": [ + "0.0.0.0/0" + ] + }, + "master-egress": { + "type": "egress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "from_port": 0, + "to_port": 0, + "protocol": "-1", + "cidr_blocks": [ + "0.0.0.0/0" + ] + }, + "node-egress": { + "type": "egress", + "security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 0, + "to_port": 0, + "protocol": "-1", + "cidr_blocks": [ + "0.0.0.0/0" + ] + }, + "node-to-master-tcp-1-2379": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 1, + "to_port": 2379, + "protocol": "tcp" + }, + "node-to-master-tcp-2382-4000": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 2382, + "to_port": 4000, + "protocol": "tcp" + }, + "node-to-master-tcp-4003-65535": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 4003, + "to_port": 65535, + "protocol": "tcp" + }, + "node-to-master-udp-1-65535": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 1, + "to_port": 65535, + "protocol": "udp" + }, + "ssh-external-to-master-0-0-0-0--0": { + "type": "ingress", + "security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}", + "from_port": 22, + "to_port": 22, + "protocol": "tcp", + "cidr_blocks": [ + "0.0.0.0/0" + ] + }, + "ssh-external-to-node-0-0-0-0--0": { + "type": "ingress", + "security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}", + "from_port": 22, + "to_port": 22, + "protocol": "tcp", + "cidr_blocks": [ + "0.0.0.0/0" + ] + } + }, + "aws_subnet": { + "us-test-1a-minimal-json-example-com": { + "vpc_id": "${aws_vpc.minimal-json-example-com.id}", + "cidr_block": "172.20.32.0/19", + "availability_zone": "us-test-1a", + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "us-test-1a.minimal-json.example.com", + "SubnetType": "Public", + "kubernetes.io/cluster/minimal-json.example.com": "owned", + "kubernetes.io/role/elb": "1" + } + } + }, + "aws_vpc": { + "minimal-json-example-com": { + "cidr_block": "172.20.0.0/16", + "enable_dns_hostnames": true, + "enable_dns_support": true, + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "minimal-json.example.com", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + } + }, + "aws_vpc_dhcp_options": { + "minimal-json-example-com": { + "domain_name": "us-test-1.compute.internal", + "domain_name_servers": [ + "AmazonProvidedDNS" + ], + "tags": { + "KubernetesCluster": "minimal-json.example.com", + "Name": "minimal-json.example.com", + "kubernetes.io/cluster/minimal-json.example.com": "owned" + } + } + }, + "aws_vpc_dhcp_options_association": { + "minimal-json-example-com": { + "vpc_id": "${aws_vpc.minimal-json-example-com.id}", + "dhcp_options_id": "${aws_vpc_dhcp_options.minimal-json-example-com.id}" + } + } + }, + "terraform": { + "required_version": "\u003e= 0.12.0" + } +} diff --git a/upup/pkg/fi/cloudup/terraform/target.go b/upup/pkg/fi/cloudup/terraform/target.go index 91dc64dd7d..2cfc829a9f 100644 --- a/upup/pkg/fi/cloudup/terraform/target.go +++ b/upup/pkg/fi/cloudup/terraform/target.go @@ -28,6 +28,7 @@ import ( hcl_parser "github.com/hashicorp/hcl/json/parser" "k8s.io/klog" "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/featureflag" "k8s.io/kops/upup/pkg/fi" ) @@ -258,7 +259,11 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error { // See https://github.com/kubernetes/kops/pull/2424 for why we require 0.9.3 terraformConfiguration := make(map[string]interface{}) - terraformConfiguration["required_version"] = ">= 0.9.3" + if featureflag.TerraformJSON.Enabled() { + terraformConfiguration["required_version"] = ">= 0.12.0" + } else { + terraformConfiguration["required_version"] = ">= 0.9.3" + } data := make(map[string]interface{}) data["terraform"] = terraformConfiguration @@ -278,10 +283,12 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error { return fmt.Errorf("error marshaling terraform data to json: %v", err) } - useJson := false - - if useJson { - t.files["kubernetes.tf"] = jsonBytes + if featureflag.TerraformJSON.Enabled() { + t.files["kubernetes.tf.json"] = jsonBytes + p := path.Join(t.outDir, "kubernetes.tf") + if _, err := os.Stat(p); err == nil { + return fmt.Errorf("Error generating kubernetes.tf.json: If you are upgrading from terraform 0.11 or earlier please read the release notes. Also, the kubernetes.tf file is already present. Please move the file away since it will be replaced by the kubernetes.tf.json file. ") + } } else { f, err := hcl_parser.Parse(jsonBytes) if err != nil { From aa6c429cc8744853c5cfc4d95d3986b0f33072b8 Mon Sep 17 00:00:00 2001 From: LinshanYu <1149439148@qq.com> Date: Sun, 19 Jan 2020 09:51:18 +0800 Subject: [PATCH 20/42] Edit author name --- docs/releases/1.9-NOTES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/releases/1.9-NOTES.md b/docs/releases/1.9-NOTES.md index bed0105301..fe292d7d02 100644 --- a/docs/releases/1.9-NOTES.md +++ b/docs/releases/1.9-NOTES.md @@ -483,11 +483,11 @@ None known at this time * Add go 1.10 testing to travis CI [@tvi](https://github.com/tvi) [#4926](https://github.com/kubernetes/kops/pull/4926) * digitalocean: use pagination for all list requests [@andrewsykim](https://github.com/andrewsykim) [#4923](https://github.com/kubernetes/kops/pull/4923) * Fix spelling [@inthecloud247](https://github.com/inthecloud247) [#4939](https://github.com/kubernetes/kops/pull/4939) -* Fix grammar mistake [@mycapatin](https://github.com/mycapatin) [#4936](https://github.com/kubernetes/kops/pull/4936) +* Fix grammar mistake [@mahuihuang](https://github.com/mahuihuang) [#4936](https://github.com/kubernetes/kops/pull/4936) * Update the recommended Ubuntu Image [@ofersadgat](https://github.com/ofersadgat) [#4934](https://github.com/kubernetes/kops/pull/4934) * Typo fix dont'->don't [@AdamDang](https://github.com/AdamDang) [#4929](https://github.com/kubernetes/kops/pull/4929) * Update rules go and use more recent debian snapshot [@mikesplain](https://github.com/mikesplain) [#4948](https://github.com/kubernetes/kops/pull/4948) -* fix typo [@mycapatin](https://github.com/mycapatin) [#4943](https://github.com/kubernetes/kops/pull/4943) +* fix typo [@mahuihuang](https://github.com/mahuihuang) [#4943](https://github.com/kubernetes/kops/pull/4943) * digitalocean: external cloud controller manager avoid circular dependencies [@andrewsykim](https://github.com/andrewsykim) [#4947](https://github.com/kubernetes/kops/pull/4947) * implement subnet task for OpenStack platform [@zengchen1024](https://github.com/zengchen1024) [#4945](https://github.com/kubernetes/kops/pull/4945) * Add warning about google cloud repository versions [@tombull](https://github.com/tombull) [#4944](https://github.com/kubernetes/kops/pull/4944) From 93492226fb0d861fef3cf86c076406b3637c632f Mon Sep 17 00:00:00 2001 From: Peter Rifel Date: Sat, 18 Jan 2020 17:33:36 -0600 Subject: [PATCH 21/42] Prefix git tags with `v` Go modules require the v prefix [0], and k/k also tags with the v prefix [1] We have some inconsistent tags already, for 1.11.0 we have tags of both `1.11.0` and `v1.11.0` which is the most recent tag with the prefix. This is also why 1.11.0 is the default version imported by `go get`: ``` go get -v k8s.io/kops go: downloading k8s.io/kops v1.11.0 ``` and the latest version in `go list`: ``` go list -m -versions k8s.io/kops k8s.io/kops v1.4.0-alpha.1 v1.4.0 v1.4.1 v1.4.2 v1.4.3 v1.4.4 v1.10.0 v1.11.0 ``` I'm proposing we switch to only tagging with the v prefix. I'm only updating the actual git tag and not the entire version string used throughout kops due to its larger impact: * Output by `kops version` * Public URLs for kops assets * Protokube tag I'm hoping this is the least invasive way we can make this change. If we think advanced notice is required, we could tag with both formats for a number of releases before tagging only with the v prefix. [0] https://github.com/golang/go/wiki/Modules#modules [1] https://github.com/kubernetes/kubernetes/releases/tag/v1.17.0 --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3d8a27db3e..555afd3f0c 100644 --- a/Makefile +++ b/Makefile @@ -585,10 +585,11 @@ ${CHANNELS}: .PHONY: release-tag release-tag: git tag ${KOPS_RELEASE_VERSION} + git tag v${KOPS_RELEASE_VERSION} .PHONY: release-github release-github: - shipbot -tag ${KOPS_RELEASE_VERSION} -config .shipbot.yaml + shipbot -tag v${KOPS_RELEASE_VERSION} -config .shipbot.yaml # -------------------------------------------------- # API / embedding examples From 9f76785f5896d9533f9f8f7d5a79b5d9533ca0f0 Mon Sep 17 00:00:00 2001 From: Ole Markus With Date: Tue, 21 Jan 2020 19:39:52 +0100 Subject: [PATCH 22/42] Bump alpha k8s version to stable --- channels/stable | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/channels/stable b/channels/stable index dbeb4c12c4..fca11ebfbb 100644 --- a/channels/stable +++ b/channels/stable @@ -49,13 +49,13 @@ spec: recommendedVersion: 1.17.0 requiredVersion: 1.17.0 - range: ">=1.16.0" - recommendedVersion: 1.16.3 + recommendedVersion: 1.16.4 requiredVersion: 1.16.0 - range: ">=1.15.0" - recommendedVersion: 1.15.6 + recommendedVersion: 1.15.7 requiredVersion: 1.15.0 - range: ">=1.14.0" - recommendedVersion: 1.14.9 + recommendedVersion: 1.14.10 requiredVersion: 1.14.0 - range: ">=1.13.0" recommendedVersion: 1.13.12 @@ -77,15 +77,15 @@ spec: - range: ">=1.16.0-alpha.1" #recommendedVersion: "1.16.0" #requiredVersion: 1.16.0 - kubernetesVersion: 1.16.3 + kubernetesVersion: 1.16.4 - range: ">=1.15.0-alpha.1" #recommendedVersion: "1.15.0" #requiredVersion: 1.15.0 - kubernetesVersion: 1.15.6 + kubernetesVersion: 1.15.7 - range: ">=1.14.0-alpha.1" #recommendedVersion: "1.14.0" #requiredVersion: 1.14.0 - kubernetesVersion: 1.14.9 + kubernetesVersion: 1.14.10 - range: ">=1.13.0-alpha.1" #recommendedVersion: "1.13.0" #requiredVersion: 1.13.0 From 95464ebdc17b86c9b3fe05a4ebd57c9d431c1c59 Mon Sep 17 00:00:00 2001 From: Ole Markus With Date: Tue, 21 Jan 2020 19:46:38 +0100 Subject: [PATCH 23/42] Bump alpha k8s versions --- channels/alpha | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/channels/alpha b/channels/alpha index a0518da32d..ab35b16d09 100644 --- a/channels/alpha +++ b/channels/alpha @@ -56,13 +56,13 @@ spec: kubenet: {} kubernetesVersions: - range: ">=1.17.0" - recommendedVersion: 1.17.0 + recommendedVersion: 1.17.2 requiredVersion: 1.17.0 - range: ">=1.16.0" - recommendedVersion: 1.16.4 + recommendedVersion: 1.16.6 requiredVersion: 1.16.0 - range: ">=1.15.0" - recommendedVersion: 1.15.7 + recommendedVersion: 1.15.9 requiredVersion: 1.15.0 - range: ">=1.14.0" recommendedVersion: 1.14.10 @@ -83,15 +83,15 @@ spec: - range: ">=1.17.0-alpha.1" #recommendedVersion: "1.17.0" #requiredVersion: 1.17.0 - kubernetesVersion: 1.17.0 + kubernetesVersion: 1.17.2 - range: ">=1.16.0-alpha.1" #recommendedVersion: "1.16.0" #requiredVersion: 1.16.0 - kubernetesVersion: 1.16.4 + kubernetesVersion: 1.16.6 - range: ">=1.15.0-alpha.1" #recommendedVersion: "1.15.0" #requiredVersion: 1.15.0 - kubernetesVersion: 1.15.7 + kubernetesVersion: 1.15.9 - range: ">=1.14.0-alpha.1" #recommendedVersion: "1.14.0" #requiredVersion: 1.14.0 From 751ce5d19fd2e05154c596cd2d7ba77a46b9a5a3 Mon Sep 17 00:00:00 2001 From: mmerrill3 Date: Wed, 22 Jan 2020 13:56:14 -0500 Subject: [PATCH 24/42] Bump etcd-manager to 3.0.20200116 (#8310) Signed-off-by: mmerrill3 --- pkg/model/components/etcd.go | 12 +++++++++--- pkg/model/components/etcdmanager/model.go | 2 +- pkg/model/components/etcdmanager/options.go | 2 +- .../components/etcdmanager/tests/minimal/tasks.yaml | 4 ++-- .../tests/old_versions_mount_hosts/tasks.yaml | 4 ++-- .../components/etcdmanager/tests/proxy/tasks.yaml | 4 ++-- 6 files changed, 17 insertions(+), 11 deletions(-) diff --git a/pkg/model/components/etcd.go b/pkg/model/components/etcd.go index a5b9ded4ae..7ce55d401c 100644 --- a/pkg/model/components/etcd.go +++ b/pkg/model/components/etcd.go @@ -24,7 +24,7 @@ import ( "k8s.io/kops/upup/pkg/fi/loader" ) -const DefaultBackupImage = "kopeio/etcd-backup:3.0.20191025" +const DefaultBackupImage = "kopeio/etcd-backup:3.0.20200116" // EtcdOptionsBuilder adds options for etcd to the model type EtcdOptionsBuilder struct { @@ -42,6 +42,8 @@ const ( DefaultEtcd3Version_1_13 = "3.2.24" DefaultEtcd3Version_1_14 = "3.3.10" + + DefaultEtcd3Version_1_17 = "3.4.3" ) // BuildOptions is responsible for filling in the defaults for the etcd cluster model @@ -62,7 +64,9 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error { // Ensure the version is set if c.Version == "" && c.Provider == kops.EtcdProviderTypeLegacy { // Even if in legacy mode, etcd version 2 is unsupported as of k8s 1.13 - if b.IsKubernetesGTE("1.14") { + if b.IsKubernetesGTE("1.17") { + c.Version = DefaultEtcd3Version_1_17 + } else if b.IsKubernetesGTE("1.14") { c.Version = DefaultEtcd3Version_1_14 } else if b.IsKubernetesGTE("1.13") { c.Version = DefaultEtcd3Version_1_13 @@ -73,7 +77,9 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error { if c.Version == "" && c.Provider == kops.EtcdProviderTypeManager { // From 1.11, we run the k8s-recommended versions of etcd when using the manager - if b.IsKubernetesGTE("1.14") { + if b.IsKubernetesGTE("1.17") { + c.Version = DefaultEtcd3Version_1_17 + } else if b.IsKubernetesGTE("1.14") { c.Version = DefaultEtcd3Version_1_14 } else if b.IsKubernetesGTE("1.13") { c.Version = DefaultEtcd3Version_1_13 diff --git a/pkg/model/components/etcdmanager/model.go b/pkg/model/components/etcdmanager/model.go index eb60e10e3c..c18f9e1493 100644 --- a/pkg/model/components/etcdmanager/model.go +++ b/pkg/model/components/etcdmanager/model.go @@ -189,7 +189,7 @@ metadata: namespace: kube-system spec: containers: - - image: kopeio/etcd-manager:3.0.20191025 + - image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: diff --git a/pkg/model/components/etcdmanager/options.go b/pkg/model/components/etcdmanager/options.go index a218c6989c..0f82eabb58 100644 --- a/pkg/model/components/etcdmanager/options.go +++ b/pkg/model/components/etcdmanager/options.go @@ -79,7 +79,7 @@ func (b *EtcdManagerOptionsBuilder) BuildOptions(o interface{}) error { return nil } -var supportedEtcdVersions = []string{"2.2.1", "3.1.12", "3.2.18", "3.2.24", "3.3.10", "3.3.13"} +var supportedEtcdVersions = []string{"2.2.1", "3.1.12", "3.2.18", "3.2.24", "3.3.10", "3.3.13", "3.4.3"} func etcdVersionIsSupported(version string) bool { version = strings.TrimPrefix(version, "v") diff --git a/pkg/model/components/etcdmanager/tests/minimal/tasks.yaml b/pkg/model/components/etcdmanager/tests/minimal/tasks.yaml index 53fa6c5b9e..188f9014e5 100644 --- a/pkg/model/components/etcdmanager/tests/minimal/tasks.yaml +++ b/pkg/model/components/etcdmanager/tests/minimal/tasks.yaml @@ -89,7 +89,7 @@ Contents: --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1 - image: kopeio/etcd-manager:3.0.20191025 + image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: @@ -154,7 +154,7 @@ Contents: --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1 - image: kopeio/etcd-manager:3.0.20191025 + image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: diff --git a/pkg/model/components/etcdmanager/tests/old_versions_mount_hosts/tasks.yaml b/pkg/model/components/etcdmanager/tests/old_versions_mount_hosts/tasks.yaml index b3d575d89c..d509ac10f7 100644 --- a/pkg/model/components/etcdmanager/tests/old_versions_mount_hosts/tasks.yaml +++ b/pkg/model/components/etcdmanager/tests/old_versions_mount_hosts/tasks.yaml @@ -89,7 +89,7 @@ Contents: --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1 - image: kopeio/etcd-manager:3.0.20191025 + image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: @@ -160,7 +160,7 @@ Contents: --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1 - image: kopeio/etcd-manager:3.0.20191025 + image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: diff --git a/pkg/model/components/etcdmanager/tests/proxy/tasks.yaml b/pkg/model/components/etcdmanager/tests/proxy/tasks.yaml index 27816eea07..b988eb1ff4 100644 --- a/pkg/model/components/etcdmanager/tests/proxy/tasks.yaml +++ b/pkg/model/components/etcdmanager/tests/proxy/tasks.yaml @@ -98,7 +98,7 @@ Contents: value: http://proxy.example.com - name: no_proxy value: noproxy.example.com - image: kopeio/etcd-manager:3.0.20191025 + image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: @@ -178,7 +178,7 @@ Contents: value: http://proxy.example.com - name: no_proxy value: noproxy.example.com - image: kopeio/etcd-manager:3.0.20191025 + image: kopeio/etcd-manager:3.0.20200116 name: etcd-manager resources: requests: From a9f3db63fc83a517cfb5baf80c50f5896bb22edd Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Thu, 23 Jan 2020 15:32:28 -0800 Subject: [PATCH 25/42] Support additional kube-scheduler config parameters via config file Mentioned in #6942 This change allows using the --config flag and a generated configfile to set options that were not previously supported and the use via flags is deprecated. (https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) I thought that it might be better to have them in a config file to ensure support in newer kubernetes versions. It also makes it easy to add more. --- nodeup/pkg/model/kube_scheduler.go | 22 +++- pkg/apis/kops/componentconfig.go | 6 ++ pkg/configbuilder/BUILD.bazel | 26 +++++ pkg/configbuilder/buildconfigfile.go | 119 ++++++++++++++++++++++ pkg/configbuilder/buildconfigfile_test.go | 85 ++++++++++++++++ 5 files changed, 257 insertions(+), 1 deletion(-) create mode 100644 pkg/configbuilder/BUILD.bazel create mode 100644 pkg/configbuilder/buildconfigfile.go create mode 100644 pkg/configbuilder/buildconfigfile_test.go diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index cd9dd4a69f..7b7ed63bec 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -20,6 +20,7 @@ import ( "fmt" "strconv" + "k8s.io/kops/pkg/configbuilder" "k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/k8scodecs" "k8s.io/kops/pkg/kubemanifest" @@ -41,6 +42,8 @@ type KubeSchedulerBuilder struct { var _ fi.ModelBuilder = &KubeSchedulerBuilder{} +var defaultKubeConfig = "/var/lib/kube-scheduler/kubeconfig" + // Build is responsible for building the manifest for the kube-scheduler func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { if !b.IsMaster { @@ -79,6 +82,23 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { }) } + if b.Cluster.Spec.KubeScheduler.KubeConfig == nil { + b.Cluster.Spec.KubeScheduler.KubeConfig = &defaultKubeConfig + } + { + config, err := configbuilder.BuildConfigYaml(b.Cluster.Spec.KubeScheduler) + if err != nil { + return err + } + + c.AddTask(&nodetasks.File{ + Path: "/var/lib/kube-scheduler/config", + Contents: fi.NewBytesResource(config), + Type: nodetasks.FileType_File, + Mode: s("0400"), + }) + } + { c.AddTask(&nodetasks.File{ Path: "/var/log/kube-scheduler.log", @@ -101,7 +121,7 @@ func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) { return nil, fmt.Errorf("error building kube-scheduler flags: %v", err) } // Add kubeconfig flag - flags = append(flags, "--kubeconfig="+"/var/lib/kube-scheduler/kubeconfig") + flags = append(flags, "--config="+"/var/lib/kube-scheduler/config") if c.UsePolicyConfigMap != nil { flags = append(flags, "--policy-configmap=scheduler-policy", "--policy-configmap-namespace=kube-system") diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 0d50c02aa1..5469a8bf91 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -609,6 +609,12 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` + // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted + Qps *float32 `json:"qps,omitempty" configfile:"ClientConnection.QPS"` + // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted + Burst *float32 `json:"qps,omitempty" configfile:"ClientConnection.Burst"` + // Overrides the default kubeconfig path. + KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/configbuilder/BUILD.bazel b/pkg/configbuilder/BUILD.bazel new file mode 100644 index 0000000000..c377d6dd2c --- /dev/null +++ b/pkg/configbuilder/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["buildconfigfile.go"], + importpath = "k8s.io/kops/pkg/configbuilder", + visibility = ["//visibility:public"], + deps = [ + "//util/pkg/reflectutils:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["buildconfigfile_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/kops:go_default_library", + "//upup/pkg/fi:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go new file mode 100644 index 0000000000..fcb56d4a9f --- /dev/null +++ b/pkg/configbuilder/buildconfigfile.go @@ -0,0 +1,119 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configbuilder + +import ( + "fmt" + "reflect" + "strings" + + "gopkg.in/yaml.v2" + + "k8s.io/klog" + "k8s.io/kops/util/pkg/reflectutils" +) + +// ClientConnectionConfig for kube-scheduler +type ClientConnectionConfig struct { + Burst *int32 `yaml:"burst,omitempty"` + Kubeconfig *string `yaml:"kubeconfig"` + QPS *float32 `yaml:"qps,omitempty"` +} + +// SchedulerConfig used to generate the config file +type SchedulerConfig struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"Kind"` + BindTimeoutSeconds *int64 `yaml:"bindTimeoutSeconds,omitempty"` + ClientConnection *ClientConnectionConfig `yaml:"clientConnection,omitempty"` +} + +// BuildConfigYaml reflects the options interface and extracts the parameters for the config file +func BuildConfigYaml(options interface{}) ([]byte, error) { + + schedConfig := new(SchedulerConfig) + schedConfig.APIVersion = "kubescheduler.config.k8s.io/v1alpha1" + schedConfig.Kind = "KubeSchedulerConfiguration" + schedConfig.ClientConnection = new(ClientConnectionConfig) + + walker := func(path string, field *reflect.StructField, val reflect.Value) error { + if field == nil { + klog.V(8).Infof("ignoring non-field: %s", path) + return nil + } + tag := field.Tag.Get("configfile") + if tag == "" { + klog.V(4).Infof("not writing field with no flag tag: %s", path) + // We want to descend - it could be a structure containing flags + return nil + } + if tag == "-" { + klog.V(4).Infof("skipping field with %q flag tag: %s", tag, path) + return reflectutils.SkipReflection + } + + tokens := strings.Split(tag, ",") + + flagName := tokens[0] + + targetValue, error := getValueFromStruct(flagName, schedConfig) + if error != nil { + return fmt.Errorf("conversion error for field %s: %s", flagName, error) + } + // We do have to do this, even though the recursive walk will do it for us + // because when we descend we won't have `field` set + if val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + } + targetValue.Set(val) + + return reflectutils.SkipReflection + } + + err := reflectutils.ReflectRecursive(reflect.ValueOf(options), walker) + if err != nil { + return nil, fmt.Errorf("BuildFlagsList to reflect value: %s", err) + } + + configFile, err := yaml.Marshal(schedConfig) + if err != nil { + return nil, err + } + + return configFile, nil +} + +func getValueFromStruct(keyWithDots string, object *SchedulerConfig) (*reflect.Value, error) { + keySlice := strings.Split(keyWithDots, ".") + v := reflect.ValueOf(object) + // iterate through field names ,ignore the first name as it might be the current instance name + // you can make it recursive also if want to support types like slice,map etc along with struct + for _, key := range keySlice { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + // we only accept structs + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("only accepts structs; got %T", v) + } + v = v.FieldByName(key) + } + + return &v, nil +} diff --git a/pkg/configbuilder/buildconfigfile_test.go b/pkg/configbuilder/buildconfigfile_test.go new file mode 100644 index 0000000000..7caf9c109e --- /dev/null +++ b/pkg/configbuilder/buildconfigfile_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configbuilder + +import ( + "bytes" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/kops/pkg/apis/kops" + "testing" +) + +func resourceValue(s string) *resource.Quantity { + q := resource.MustParse(s) + return &q +} + +func TestParseBasic(t *testing.T) { + expect := []byte( + `apiVersion: kubescheduler.config.k8s.io/v1alpha1 +Kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: null + qps: 3 +`) + qps := float32(3.0) + s := &kops.KubeSchedulerConfig{Qps: &qps} + + yaml, err := BuildConfigYaml(s) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if !bytes.Equal(yaml, expect) { + t.Errorf("unexpected result: %v, expected: %v", expect, yaml) + } +} + +func TestGetStructVal(t *testing.T) { + str := "test" + s := &SchedulerConfig{ + ClientConnection: &ClientConnectionConfig{ + Kubeconfig: &str, + }, + } + v, err := getValueFromStruct("ClientConnection.Kubeconfig", s) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + inStruct := v.Elem().String() + if inStruct != str { + t.Errorf("unexpected value: %s, %s, expected: %s", inStruct, err, str) + } + +} + +func TestWrongStructField(t *testing.T) { + str := "test" + s := &SchedulerConfig{ + ClientConnection: &ClientConnectionConfig{ + Kubeconfig: &str, + }, + } + v, err := getValueFromStruct("ClientConnection.NotExistent", s) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if v.IsValid() { + t.Errorf("unexpected Valid value from non-existent field lookup") + } + +} From 13047dbb26568086bf159b2f92f4b1d408b269aa Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Thu, 23 Jan 2020 19:23:23 -0800 Subject: [PATCH 26/42] Removes unused func --- pkg/configbuilder/buildconfigfile_test.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pkg/configbuilder/buildconfigfile_test.go b/pkg/configbuilder/buildconfigfile_test.go index 7caf9c109e..70015c08f8 100644 --- a/pkg/configbuilder/buildconfigfile_test.go +++ b/pkg/configbuilder/buildconfigfile_test.go @@ -18,15 +18,10 @@ package configbuilder import ( "bytes" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/kops/pkg/apis/kops" "testing" -) -func resourceValue(s string) *resource.Quantity { - q := resource.MustParse(s) - return &q -} + "k8s.io/kops/pkg/apis/kops" +) func TestParseBasic(t *testing.T) { expect := []byte( From 7a017396ba77b99be95caf9f0dc0bacb71ab7ad8 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Thu, 23 Jan 2020 19:58:45 -0800 Subject: [PATCH 27/42] Fixes incorrect tag name and copyright year --- nodeup/pkg/model/kube_scheduler.go | 2 +- pkg/apis/kops/componentconfig.go | 4 ++-- pkg/configbuilder/buildconfigfile.go | 2 +- pkg/configbuilder/buildconfigfile_test.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index 7b7ed63bec..0a19ebefd6 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 5469a8bf91..4598bed940 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -612,7 +612,7 @@ type KubeSchedulerConfig struct { // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted Qps *float32 `json:"qps,omitempty" configfile:"ClientConnection.QPS"` // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted - Burst *float32 `json:"qps,omitempty" configfile:"ClientConnection.Burst"` + Burst *float32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` // Overrides the default kubeconfig path. KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index fcb56d4a9f..00c70367ca 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/configbuilder/buildconfigfile_test.go b/pkg/configbuilder/buildconfigfile_test.go index 70015c08f8..ae7fff17c0 100644 --- a/pkg/configbuilder/buildconfigfile_test.go +++ b/pkg/configbuilder/buildconfigfile_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 447b46d10918643fce7058c90f5c3611615c20ed Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Thu, 23 Jan 2020 20:11:41 -0800 Subject: [PATCH 28/42] Fix for copyright and ran update-bazel.sh --- nodeup/pkg/model/BUILD.bazel | 1 + nodeup/pkg/model/kube_scheduler.go | 2 +- pkg/apis/kops/componentconfig.go | 2 +- pkg/configbuilder/BUILD.bazel | 10 ++-------- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/nodeup/pkg/model/BUILD.bazel b/nodeup/pkg/model/BUILD.bazel index acb16f8fdf..b503b2f1d7 100644 --- a/nodeup/pkg/model/BUILD.bazel +++ b/nodeup/pkg/model/BUILD.bazel @@ -46,6 +46,7 @@ go_library( "//pkg/apis/kops/util:go_default_library", "//pkg/apis/nodeup:go_default_library", "//pkg/assets:go_default_library", + "//pkg/configbuilder:go_default_library", "//pkg/dns:go_default_library", "//pkg/flagbuilder:go_default_library", "//pkg/k8scodecs:go_default_library", diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index 0a19ebefd6..7b7ed63bec 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 4598bed940..7e1eb2b493 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/configbuilder/BUILD.bazel b/pkg/configbuilder/BUILD.bazel index c377d6dd2c..ece484caba 100644 --- a/pkg/configbuilder/BUILD.bazel +++ b/pkg/configbuilder/BUILD.bazel @@ -7,8 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//util/pkg/reflectutils:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -17,10 +16,5 @@ go_test( name = "go_default_test", srcs = ["buildconfigfile_test.go"], embed = [":go_default_library"], - deps = [ - "//pkg/apis/kops:go_default_library", - "//upup/pkg/fi:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], + deps = ["//pkg/apis/kops:go_default_library"], ) From b9945594fd40dd8bfe06c6fef77b5b068f18a249 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Thu, 23 Jan 2020 20:31:18 -0800 Subject: [PATCH 29/42] Updates based on feedback --- hack/.packages | 1 + pkg/apis/kops/componentconfig.go | 2 +- pkg/configbuilder/buildconfigfile.go | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hack/.packages b/hack/.packages index 479e397c9a..0819aa77fa 100644 --- a/hack/.packages +++ b/hack/.packages @@ -82,6 +82,7 @@ k8s.io/kops/pkg/client/simple/api k8s.io/kops/pkg/client/simple/vfsclientset k8s.io/kops/pkg/cloudinstances k8s.io/kops/pkg/commands +k8s.io/kops/pkg/configbuilder k8s.io/kops/pkg/diff k8s.io/kops/pkg/dns k8s.io/kops/pkg/drain diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 7e1eb2b493..44f0d843aa 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -613,7 +613,7 @@ type KubeSchedulerConfig struct { Qps *float32 `json:"qps,omitempty" configfile:"ClientConnection.QPS"` // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted Burst *float32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` - // Overrides the default kubeconfig path. + // KubeConfig overrides the default kubeconfig path. KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index 00c70367ca..1b94b3e3b9 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -27,17 +27,17 @@ import ( "k8s.io/kops/util/pkg/reflectutils" ) -// ClientConnectionConfig for kube-scheduler +// ClientConnectionConfig is used by kube-scheduler to talk to the api server type ClientConnectionConfig struct { Burst *int32 `yaml:"burst,omitempty"` Kubeconfig *string `yaml:"kubeconfig"` QPS *float32 `yaml:"qps,omitempty"` } -// SchedulerConfig used to generate the config file +// SchedulerConfig is used to generate the config file type SchedulerConfig struct { APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"Kind"` + Kind string `yaml:"kind"` BindTimeoutSeconds *int64 `yaml:"bindTimeoutSeconds,omitempty"` ClientConnection *ClientConnectionConfig `yaml:"clientConnection,omitempty"` } From ac9c368cc36a0adab804bd698fb4ec28b5c77245 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Thu, 23 Jan 2020 20:41:41 -0800 Subject: [PATCH 30/42] Fix broken test after last change --- pkg/configbuilder/buildconfigfile_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/configbuilder/buildconfigfile_test.go b/pkg/configbuilder/buildconfigfile_test.go index ae7fff17c0..af267c9649 100644 --- a/pkg/configbuilder/buildconfigfile_test.go +++ b/pkg/configbuilder/buildconfigfile_test.go @@ -26,7 +26,7 @@ import ( func TestParseBasic(t *testing.T) { expect := []byte( `apiVersion: kubescheduler.config.k8s.io/v1alpha1 -Kind: KubeSchedulerConfiguration +kind: KubeSchedulerConfiguration clientConnection: kubeconfig: null qps: 3 From 117353c808713323408120dcc0dacbb76960d1d1 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Fri, 24 Jan 2020 01:42:49 -0800 Subject: [PATCH 31/42] Fixing ci tests --- k8s/crds/kops.k8s.io_clusters.yaml | 12 ++++++++++++ pkg/apis/kops/componentconfig.go | 4 ++-- pkg/apis/kops/v1alpha1/componentconfig.go | 6 ++++++ .../kops/v1alpha1/zz_generated.conversion.go | 6 ++++++ pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go | 15 +++++++++++++++ pkg/apis/kops/v1alpha2/componentconfig.go | 6 ++++++ .../kops/v1alpha2/zz_generated.conversion.go | 6 ++++++ pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go | 15 +++++++++++++++ pkg/apis/kops/zz_generated.deepcopy.go | 15 +++++++++++++++ pkg/configbuilder/BUILD.bazel | 6 +++++- pkg/configbuilder/buildconfigfile.go | 17 ++++++++++++++--- pkg/configbuilder/buildconfigfile_test.go | 8 +++++--- 12 files changed, 107 insertions(+), 9 deletions(-) diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index 71f4093b3a..f363349665 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -1609,6 +1609,11 @@ spec: kubeScheduler: description: KubeSchedulerConfig is the configuration for the kube-scheduler properties: + burst: + description: Burst sets the maximum qps to send to apiserver after + the burst quota is exhausted + format: int32 + type: integer featureGates: additionalProperties: type: string @@ -1618,6 +1623,9 @@ spec: image: description: Image is the docker image to use type: string + kubeConfig: + description: KubeConfig overrides the default kubeconfig path. + type: string leaderElection: description: LeaderElection defines the configuration of leader election client. @@ -1677,6 +1685,10 @@ spec: and the cloud provider as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/' format: int32 type: integer + qps: + description: Qps sets the maximum qps to send to apiserver after + the burst quota is exhausted + type: string usePolicyConfigMap: description: UsePolicyConfigMap enable setting the scheduler policy from a configmap diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 44f0d843aa..901ba60307 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -610,9 +610,9 @@ type KubeSchedulerConfig struct { // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted - Qps *float32 `json:"qps,omitempty" configfile:"ClientConnection.QPS"` + Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted - Burst *float32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` + Burst *int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` // KubeConfig overrides the default kubeconfig path. KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } diff --git a/pkg/apis/kops/v1alpha1/componentconfig.go b/pkg/apis/kops/v1alpha1/componentconfig.go index 2b7fa7c587..958417146e 100644 --- a/pkg/apis/kops/v1alpha1/componentconfig.go +++ b/pkg/apis/kops/v1alpha1/componentconfig.go @@ -609,6 +609,12 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` + // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted + Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` + // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted + Burst *int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` + // KubeConfig overrides the default kubeconfig path. + KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index 3bf36384dd..d287172d9e 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -3656,6 +3656,9 @@ func autoConvert_v1alpha1_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes + out.Qps = in.Qps + out.Burst = in.Burst + out.KubeConfig = in.KubeConfig return nil } @@ -3680,6 +3683,9 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *ko out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes + out.Qps = in.Qps + out.Burst = in.Burst + out.KubeConfig = in.KubeConfig return nil } diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index c27185539a..8cfcfb319b 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -2415,6 +2415,21 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } + if in.Qps != nil { + in, out := &in.Qps, &out.Qps + x := (*in).DeepCopy() + *out = &x + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int32) + **out = **in + } + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = new(string) + **out = **in + } return } diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index 25ef37748a..77931026d2 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -610,6 +610,12 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` + // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted + Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` + // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted + Burst *int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` + // KubeConfig overrides the default kubeconfig path. + KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 6668f16534..44d08a6371 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -3926,6 +3926,9 @@ func autoConvert_v1alpha2_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes + out.Qps = in.Qps + out.Burst = in.Burst + out.KubeConfig = in.KubeConfig return nil } @@ -3950,6 +3953,9 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *ko out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes + out.Qps = in.Qps + out.Burst = in.Burst + out.KubeConfig = in.KubeConfig return nil } diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 2994fda089..b20569e95b 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -2486,6 +2486,21 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } + if in.Qps != nil { + in, out := &in.Qps, &out.Qps + x := (*in).DeepCopy() + *out = &x + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int32) + **out = **in + } + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = new(string) + **out = **in + } return } diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index f54d81b551..d304a46484 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -2668,6 +2668,21 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } + if in.Qps != nil { + in, out := &in.Qps, &out.Qps + x := (*in).DeepCopy() + *out = &x + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int32) + **out = **in + } + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = new(string) + **out = **in + } return } diff --git a/pkg/configbuilder/BUILD.bazel b/pkg/configbuilder/BUILD.bazel index ece484caba..534fbbea45 100644 --- a/pkg/configbuilder/BUILD.bazel +++ b/pkg/configbuilder/BUILD.bazel @@ -8,6 +8,7 @@ go_library( deps = [ "//util/pkg/reflectutils:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -16,5 +17,8 @@ go_test( name = "go_default_test", srcs = ["buildconfigfile_test.go"], embed = [":go_default_library"], - deps = ["//pkg/apis/kops:go_default_library"], + deps = [ + "//pkg/apis/kops:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], ) diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index 1b94b3e3b9..49f7191d54 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -19,10 +19,11 @@ package configbuilder import ( "fmt" "reflect" + "strconv" "strings" "gopkg.in/yaml.v2" - + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog" "k8s.io/kops/util/pkg/reflectutils" ) @@ -31,7 +32,7 @@ import ( type ClientConnectionConfig struct { Burst *int32 `yaml:"burst,omitempty"` Kubeconfig *string `yaml:"kubeconfig"` - QPS *float32 `yaml:"qps,omitempty"` + QPS *float64 `yaml:"qps,omitempty"` } // SchedulerConfig is used to generate the config file @@ -81,9 +82,19 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { return nil } } - targetValue.Set(val) + switch v := val.Interface().(type) { + case *resource.Quantity: + floatVal, err := strconv.ParseFloat(v.AsDec().String(), 64) + if err != nil { + return fmt.Errorf("unable to convert from Quantity %v to float", v) + } + targetValue.Set(reflect.ValueOf(&floatVal)) + default: + targetValue.Set(val) + } return reflectutils.SkipReflection + } err := reflectutils.ReflectRecursive(reflect.ValueOf(options), walker) diff --git a/pkg/configbuilder/buildconfigfile_test.go b/pkg/configbuilder/buildconfigfile_test.go index af267c9649..08dec4ae50 100644 --- a/pkg/configbuilder/buildconfigfile_test.go +++ b/pkg/configbuilder/buildconfigfile_test.go @@ -20,6 +20,7 @@ import ( "bytes" "testing" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kops/pkg/apis/kops" ) @@ -29,9 +30,10 @@ func TestParseBasic(t *testing.T) { kind: KubeSchedulerConfiguration clientConnection: kubeconfig: null - qps: 3 + qps: 3.1 `) - qps := float32(3.0) + qps, _ := resource.ParseQuantity("3.1") + s := &kops.KubeSchedulerConfig{Qps: &qps} yaml, err := BuildConfigYaml(s) @@ -40,7 +42,7 @@ clientConnection: } if !bytes.Equal(yaml, expect) { - t.Errorf("unexpected result: %v, expected: %v", expect, yaml) + t.Errorf("unexpected result: \n%s, expected: \n%s", expect, yaml) } } From d15971e240d4c72f59267cf9989cac9fa13aaaca Mon Sep 17 00:00:00 2001 From: GuyTempleton Date: Fri, 24 Jan 2020 15:56:13 +0000 Subject: [PATCH 32/42] Remove tmp volume as well as mount --- .../addons/coredns.addons.k8s.io/k8s-1.12.yaml.template | 3 --- 1 file changed, 3 deletions(-) diff --git a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template index 8c8c20080a..e7c08d6497 100644 --- a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template +++ b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template @@ -214,9 +214,6 @@ spec: scheme: HTTP dnsPolicy: Default volumes: - # Workaround for 1.3.1 bug, can be removed after bumping to 1.4+. See: https://github.com/coredns/coredns/pull/2529 - - name: tmp - emptyDir: {} - name: config-volume configMap: name: coredns From 069e74a27fea438c19f58348166a7d163f4f9dce Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala <1328701+rralcala@users.noreply.github.com> Date: Sat, 25 Jan 2020 10:07:02 -0800 Subject: [PATCH 33/42] Update pkg/configbuilder/buildconfigfile.go Co-Authored-By: John Gardiner Myers --- pkg/configbuilder/buildconfigfile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index 49f7191d54..3f3bf2f8b6 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -58,7 +58,7 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { } tag := field.Tag.Get("configfile") if tag == "" { - klog.V(4).Infof("not writing field with no flag tag: %s", path) + klog.V(4).Infof("not writing field with no configfile tag: %s", path) // We want to descend - it could be a structure containing flags return nil } From 99c57b6d8791b3b12bace6e84b4dff988e071b37 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala <1328701+rralcala@users.noreply.github.com> Date: Sat, 25 Jan 2020 10:07:13 -0800 Subject: [PATCH 34/42] Update pkg/configbuilder/buildconfigfile.go Co-Authored-By: John Gardiner Myers --- pkg/configbuilder/buildconfigfile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index 3f3bf2f8b6..056beea363 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -63,7 +63,7 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { return nil } if tag == "-" { - klog.V(4).Infof("skipping field with %q flag tag: %s", tag, path) + klog.V(4).Infof("skipping field with %q configfile tag: %s", tag, path) return reflectutils.SkipReflection } From f5c8e46ae243d747002a95f7ed12be48bc16a583 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala <1328701+rralcala@users.noreply.github.com> Date: Sat, 25 Jan 2020 10:07:21 -0800 Subject: [PATCH 35/42] Update pkg/configbuilder/buildconfigfile.go Co-Authored-By: John Gardiner Myers --- pkg/configbuilder/buildconfigfile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index 056beea363..e8986fab05 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -113,7 +113,7 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { func getValueFromStruct(keyWithDots string, object *SchedulerConfig) (*reflect.Value, error) { keySlice := strings.Split(keyWithDots, ".") v := reflect.ValueOf(object) - // iterate through field names ,ignore the first name as it might be the current instance name + // iterate through field names, ignoring the first name as it might be the current instance name // you can make it recursive also if want to support types like slice,map etc along with struct for _, key := range keySlice { for v.Kind() == reflect.Ptr { From 1298d541ccec9064cd36b942a991a08baf822809 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Sat, 25 Jan 2020 12:05:35 -0800 Subject: [PATCH 36/42] Updates based on feedback --- k8s/crds/kops.k8s.io_clusters.yaml | 12 ----- nodeup/pkg/model/BUILD.bazel | 2 + nodeup/pkg/model/kube_scheduler.go | 49 ++++++++++++++----- nodeup/pkg/model/kube_scheduler_test.go | 48 ++++++++++++++++++ pkg/apis/kops/componentconfig.go | 4 +- pkg/apis/kops/v1alpha1/componentconfig.go | 6 --- pkg/apis/kops/v1alpha1/conversion.go | 4 ++ .../kops/v1alpha1/zz_generated.conversion.go | 18 +++---- .../kops/v1alpha1/zz_generated.deepcopy.go | 15 ------ pkg/apis/kops/v1alpha2/BUILD.bazel | 1 + pkg/apis/kops/v1alpha2/componentconfig.go | 6 --- pkg/apis/kops/v1alpha2/conversion.go | 26 ++++++++++ .../kops/v1alpha2/zz_generated.conversion.go | 18 +++---- .../kops/v1alpha2/zz_generated.deepcopy.go | 15 ------ pkg/apis/kops/zz_generated.deepcopy.go | 10 ---- pkg/configbuilder/BUILD.bazel | 5 +- pkg/configbuilder/buildconfigfile.go | 31 +++--------- pkg/configbuilder/buildconfigfile_test.go | 45 ++++++----------- 18 files changed, 155 insertions(+), 160 deletions(-) create mode 100644 nodeup/pkg/model/kube_scheduler_test.go create mode 100644 pkg/apis/kops/v1alpha2/conversion.go diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index f363349665..71f4093b3a 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -1609,11 +1609,6 @@ spec: kubeScheduler: description: KubeSchedulerConfig is the configuration for the kube-scheduler properties: - burst: - description: Burst sets the maximum qps to send to apiserver after - the burst quota is exhausted - format: int32 - type: integer featureGates: additionalProperties: type: string @@ -1623,9 +1618,6 @@ spec: image: description: Image is the docker image to use type: string - kubeConfig: - description: KubeConfig overrides the default kubeconfig path. - type: string leaderElection: description: LeaderElection defines the configuration of leader election client. @@ -1685,10 +1677,6 @@ spec: and the cloud provider as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/' format: int32 type: integer - qps: - description: Qps sets the maximum qps to send to apiserver after - the burst quota is exhausted - type: string usePolicyConfigMap: description: UsePolicyConfigMap enable setting the scheduler policy from a configmap diff --git a/nodeup/pkg/model/BUILD.bazel b/nodeup/pkg/model/BUILD.bazel index b503b2f1d7..9f786cf1c0 100644 --- a/nodeup/pkg/model/BUILD.bazel +++ b/nodeup/pkg/model/BUILD.bazel @@ -89,6 +89,7 @@ go_test( "docker_test.go", "kube_apiserver_test.go", "kube_proxy_test.go", + "kube_scheduler_test.go", "kubelet_test.go", "protokube_test.go", ], @@ -98,6 +99,7 @@ go_test( "//nodeup/pkg/distros:go_default_library", "//pkg/apis/kops:go_default_library", "//pkg/apis/nodeup:go_default_library", + "//pkg/configbuilder:go_default_library", "//pkg/flagbuilder:go_default_library", "//pkg/testutils:go_default_library", "//upup/pkg/fi:go_default_library", diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index 7b7ed63bec..b8cff08983 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -35,6 +35,21 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +// ClientConnectionConfig is used by kube-scheduler to talk to the api server +type ClientConnectionConfig struct { + Burst int32 `yaml:"burst,omitempty"` + Kubeconfig *string `yaml:"kubeconfig"` + QPS *float64 `yaml:"qps,omitempty"` +} + +// SchedulerConfig is used to generate the config file +type SchedulerConfig struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + BindTimeoutSeconds *int64 `yaml:"bindTimeoutSeconds,omitempty"` + ClientConnection *ClientConnectionConfig `yaml:"clientConnection,omitempty"` +} + // KubeSchedulerBuilder install kube-scheduler type KubeSchedulerBuilder struct { *NodeupModelContext @@ -49,9 +64,9 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { if !b.IsMaster { return nil } - + useConfigFile := b.IsKubernetesGTE("1.11") { - pod, err := b.buildPod() + pod, err := b.buildPod(useConfigFile) if err != nil { return fmt.Errorf("error building kube-scheduler pod: %v", err) } @@ -81,18 +96,14 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { Mode: s("0400"), }) } - - if b.Cluster.Spec.KubeScheduler.KubeConfig == nil { - b.Cluster.Spec.KubeScheduler.KubeConfig = &defaultKubeConfig - } - { - config, err := configbuilder.BuildConfigYaml(b.Cluster.Spec.KubeScheduler) + if useConfigFile { + config, err := configbuilder.BuildConfigYaml(b.Cluster.Spec.KubeScheduler, NewSchedulerConfig()) if err != nil { return err } c.AddTask(&nodetasks.File{ - Path: "/var/lib/kube-scheduler/config", + Path: "/var/lib/kube-scheduler/config.yaml", Contents: fi.NewBytesResource(config), Type: nodetasks.FileType_File, Mode: s("0400"), @@ -112,16 +123,30 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { return nil } +// NewSchedulerConfig initializes a new kube-scheduler config file +func NewSchedulerConfig() *SchedulerConfig { + schedConfig := new(SchedulerConfig) + schedConfig.APIVersion = "kubescheduler.config.k8s.io/v1alpha1" + schedConfig.Kind = "KubeSchedulerConfiguration" + schedConfig.ClientConnection = new(ClientConnectionConfig) + schedConfig.ClientConnection.Kubeconfig = &defaultKubeConfig + return schedConfig +} + // buildPod is responsible for constructing the pod specification -func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) { +func (b *KubeSchedulerBuilder) buildPod(useConfigFile bool) (*v1.Pod, error) { c := b.Cluster.Spec.KubeScheduler flags, err := flagbuilder.BuildFlagsList(c) if err != nil { return nil, fmt.Errorf("error building kube-scheduler flags: %v", err) } - // Add kubeconfig flag - flags = append(flags, "--config="+"/var/lib/kube-scheduler/config") + if useConfigFile { + flags = append(flags, "--config="+"/var/lib/kube-scheduler/config.yaml") + } else { + // Add kubeconfig flag + flags = append(flags, "--config="+defaultKubeConfig) + } if c.UsePolicyConfigMap != nil { flags = append(flags, "--policy-configmap=scheduler-policy", "--policy-configmap-namespace=kube-system") diff --git a/nodeup/pkg/model/kube_scheduler_test.go b/nodeup/pkg/model/kube_scheduler_test.go new file mode 100644 index 0000000000..140da7eb81 --- /dev/null +++ b/nodeup/pkg/model/kube_scheduler_test.go @@ -0,0 +1,48 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package model + +import ( + "bytes" + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/configbuilder" +) + +func TestParseBasic(t *testing.T) { + expect := []byte( + `apiVersion: kubescheduler.config.k8s.io/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + qps: 3.1 +`) + qps, _ := resource.ParseQuantity("3.1") + + s := &kops.KubeSchedulerConfig{Qps: &qps} + + yaml, err := configbuilder.BuildConfigYaml(s, NewSchedulerConfig()) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if !bytes.Equal(yaml, expect) { + t.Errorf("unexpected result: \n%s, expected: \n%s", yaml, expect) + } +} diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 901ba60307..9abd3d4451 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -612,9 +612,7 @@ type KubeSchedulerConfig struct { // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted - Burst *int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` - // KubeConfig overrides the default kubeconfig path. - KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` + Burst int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha1/componentconfig.go b/pkg/apis/kops/v1alpha1/componentconfig.go index 958417146e..2b7fa7c587 100644 --- a/pkg/apis/kops/v1alpha1/componentconfig.go +++ b/pkg/apis/kops/v1alpha1/componentconfig.go @@ -609,12 +609,6 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` - // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted - Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` - // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted - Burst *int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` - // KubeConfig overrides the default kubeconfig path. - KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha1/conversion.go b/pkg/apis/kops/v1alpha1/conversion.go index 69f7d549e0..5e9ec20310 100644 --- a/pkg/apis/kops/v1alpha1/conversion.go +++ b/pkg/apis/kops/v1alpha1/conversion.go @@ -38,6 +38,10 @@ func Convert_v1alpha1_BastionSpec_To_kops_BastionSpec(in *BastionSpec, out *kops return nil } +func Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in, out, s) +} + func Convert_kops_BastionSpec_To_v1alpha1_BastionSpec(in *kops.BastionSpec, out *BastionSpec, s conversion.Scope) error { out.PublicName = in.BastionPublicName out.IdleTimeout = in.IdleTimeoutSeconds diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index d287172d9e..c8ae1505f8 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -813,6 +813,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*kops.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(a.(*kops.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*kops.TopologySpec)(nil), (*TopologySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_kops_TopologySpec_To_v1alpha1_TopologySpec(a.(*kops.TopologySpec), b.(*TopologySpec), scope) }); err != nil { @@ -3656,9 +3661,6 @@ func autoConvert_v1alpha1_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes - out.Qps = in.Qps - out.Burst = in.Burst - out.KubeConfig = in.KubeConfig return nil } @@ -3683,17 +3685,11 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *ko out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes - out.Qps = in.Qps - out.Burst = in.Burst - out.KubeConfig = in.KubeConfig + // WARNING: in.Qps requires manual conversion: does not exist in peer-type + // WARNING: in.Burst requires manual conversion: does not exist in peer-type return nil } -// Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig is an autogenerated conversion function. -func Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { - return autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in, out, s) -} - func autoConvert_v1alpha1_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *KubeletConfigSpec, out *kops.KubeletConfigSpec, s conversion.Scope) error { out.APIServers = in.APIServers out.AnonymousAuth = in.AnonymousAuth diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index 8cfcfb319b..c27185539a 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -2415,21 +2415,6 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } - if in.Qps != nil { - in, out := &in.Qps, &out.Qps - x := (*in).DeepCopy() - *out = &x - } - if in.Burst != nil { - in, out := &in.Burst, &out.Burst - *out = new(int32) - **out = **in - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = new(string) - **out = **in - } return } diff --git a/pkg/apis/kops/v1alpha2/BUILD.bazel b/pkg/apis/kops/v1alpha2/BUILD.bazel index 21341d46a6..d6651713ce 100644 --- a/pkg/apis/kops/v1alpha2/BUILD.bazel +++ b/pkg/apis/kops/v1alpha2/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "cluster.go", "componentconfig.go", "containerdconfig.go", + "conversion.go", "defaults.go", "doc.go", "dockerconfig.go", diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index 77931026d2..25ef37748a 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -610,12 +610,6 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` - // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted - Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` - // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted - Burst *int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` - // KubeConfig overrides the default kubeconfig path. - KubeConfig *string `json:"kubeConfig,omitempty" configfile:"ClientConnection.Kubeconfig"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha2/conversion.go b/pkg/apis/kops/v1alpha2/conversion.go new file mode 100644 index 0000000000..9efc23e516 --- /dev/null +++ b/pkg/apis/kops/v1alpha2/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/kops/pkg/apis/kops" +) + +func Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in, out, s) +} diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 44d08a6371..f80210bbf2 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -863,6 +863,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*kops.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(a.(*kops.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope) + }); err != nil { + return err + } return nil } @@ -3926,9 +3931,6 @@ func autoConvert_v1alpha2_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes - out.Qps = in.Qps - out.Burst = in.Burst - out.KubeConfig = in.KubeConfig return nil } @@ -3953,17 +3955,11 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *ko out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes - out.Qps = in.Qps - out.Burst = in.Burst - out.KubeConfig = in.KubeConfig + // WARNING: in.Qps requires manual conversion: does not exist in peer-type + // WARNING: in.Burst requires manual conversion: does not exist in peer-type return nil } -// Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig is an autogenerated conversion function. -func Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { - return autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in, out, s) -} - func autoConvert_v1alpha2_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *KubeletConfigSpec, out *kops.KubeletConfigSpec, s conversion.Scope) error { out.APIServers = in.APIServers out.AnonymousAuth = in.AnonymousAuth diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index b20569e95b..2994fda089 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -2486,21 +2486,6 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } - if in.Qps != nil { - in, out := &in.Qps, &out.Qps - x := (*in).DeepCopy() - *out = &x - } - if in.Burst != nil { - in, out := &in.Burst, &out.Burst - *out = new(int32) - **out = **in - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = new(string) - **out = **in - } return } diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index d304a46484..916c6c4c76 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -2673,16 +2673,6 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { x := (*in).DeepCopy() *out = &x } - if in.Burst != nil { - in, out := &in.Burst, &out.Burst - *out = new(int32) - **out = **in - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = new(string) - **out = **in - } return } diff --git a/pkg/configbuilder/BUILD.bazel b/pkg/configbuilder/BUILD.bazel index 534fbbea45..9c18771803 100644 --- a/pkg/configbuilder/BUILD.bazel +++ b/pkg/configbuilder/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "k8s.io/kops/pkg/configbuilder", visibility = ["//visibility:public"], deps = [ + "//pkg/apis/kops:go_default_library", "//util/pkg/reflectutils:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -17,8 +18,4 @@ go_test( name = "go_default_test", srcs = ["buildconfigfile_test.go"], embed = [":go_default_library"], - deps = [ - "//pkg/apis/kops:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], ) diff --git a/pkg/configbuilder/buildconfigfile.go b/pkg/configbuilder/buildconfigfile.go index e8986fab05..f65d47bb85 100644 --- a/pkg/configbuilder/buildconfigfile.go +++ b/pkg/configbuilder/buildconfigfile.go @@ -18,6 +18,7 @@ package configbuilder import ( "fmt" + "reflect" "strconv" "strings" @@ -25,32 +26,12 @@ import ( "gopkg.in/yaml.v2" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog" + "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/util/pkg/reflectutils" ) -// ClientConnectionConfig is used by kube-scheduler to talk to the api server -type ClientConnectionConfig struct { - Burst *int32 `yaml:"burst,omitempty"` - Kubeconfig *string `yaml:"kubeconfig"` - QPS *float64 `yaml:"qps,omitempty"` -} - -// SchedulerConfig is used to generate the config file -type SchedulerConfig struct { - APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - BindTimeoutSeconds *int64 `yaml:"bindTimeoutSeconds,omitempty"` - ClientConnection *ClientConnectionConfig `yaml:"clientConnection,omitempty"` -} - // BuildConfigYaml reflects the options interface and extracts the parameters for the config file -func BuildConfigYaml(options interface{}) ([]byte, error) { - - schedConfig := new(SchedulerConfig) - schedConfig.APIVersion = "kubescheduler.config.k8s.io/v1alpha1" - schedConfig.Kind = "KubeSchedulerConfiguration" - schedConfig.ClientConnection = new(ClientConnectionConfig) - +func BuildConfigYaml(options *kops.KubeSchedulerConfig, target interface{}) ([]byte, error) { walker := func(path string, field *reflect.StructField, val reflect.Value) error { if field == nil { klog.V(8).Infof("ignoring non-field: %s", path) @@ -71,7 +52,7 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { flagName := tokens[0] - targetValue, error := getValueFromStruct(flagName, schedConfig) + targetValue, error := getValueFromStruct(flagName, target) if error != nil { return fmt.Errorf("conversion error for field %s: %s", flagName, error) } @@ -102,7 +83,7 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { return nil, fmt.Errorf("BuildFlagsList to reflect value: %s", err) } - configFile, err := yaml.Marshal(schedConfig) + configFile, err := yaml.Marshal(target) if err != nil { return nil, err } @@ -110,7 +91,7 @@ func BuildConfigYaml(options interface{}) ([]byte, error) { return configFile, nil } -func getValueFromStruct(keyWithDots string, object *SchedulerConfig) (*reflect.Value, error) { +func getValueFromStruct(keyWithDots string, object interface{}) (*reflect.Value, error) { keySlice := strings.Split(keyWithDots, ".") v := reflect.ValueOf(object) // iterate through field names, ignoring the first name as it might be the current instance name diff --git a/pkg/configbuilder/buildconfigfile_test.go b/pkg/configbuilder/buildconfigfile_test.go index 08dec4ae50..ff23af100f 100644 --- a/pkg/configbuilder/buildconfigfile_test.go +++ b/pkg/configbuilder/buildconfigfile_test.go @@ -17,43 +17,28 @@ limitations under the License. package configbuilder import ( - "bytes" "testing" - - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/kops/pkg/apis/kops" ) -func TestParseBasic(t *testing.T) { - expect := []byte( - `apiVersion: kubescheduler.config.k8s.io/v1alpha1 -kind: KubeSchedulerConfiguration -clientConnection: - kubeconfig: null - qps: 3.1 -`) - qps, _ := resource.ParseQuantity("3.1") +// ClientConnectionConfig is used by kube-scheduler to talk to the api server +type DummyNestedStruct struct { + Name *string `yaml:"name,omitempty"` + QPS *float64 `yaml:"qps,omitempty"` +} - s := &kops.KubeSchedulerConfig{Qps: &qps} - - yaml, err := BuildConfigYaml(s) - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if !bytes.Equal(yaml, expect) { - t.Errorf("unexpected result: \n%s, expected: \n%s", expect, yaml) - } +// SchedulerConfig is used to generate the config file +type DummyStruct struct { + ClientConnection *DummyNestedStruct `yaml:"clientConnection,omitempty"` } func TestGetStructVal(t *testing.T) { str := "test" - s := &SchedulerConfig{ - ClientConnection: &ClientConnectionConfig{ - Kubeconfig: &str, + s := &DummyStruct{ + ClientConnection: &DummyNestedStruct{ + Name: &str, }, } - v, err := getValueFromStruct("ClientConnection.Kubeconfig", s) + v, err := getValueFromStruct("ClientConnection.Name", s) if err != nil { t.Errorf("unexpected error: %s", err) } @@ -66,9 +51,9 @@ func TestGetStructVal(t *testing.T) { func TestWrongStructField(t *testing.T) { str := "test" - s := &SchedulerConfig{ - ClientConnection: &ClientConnectionConfig{ - Kubeconfig: &str, + s := &DummyStruct{ + ClientConnection: &DummyNestedStruct{ + Name: &str, }, } v, err := getValueFromStruct("ClientConnection.NotExistent", s) From d56ad413343d26f0047bb908eb72424b6c91fcfe Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sun, 26 Jan 2020 10:46:03 -0800 Subject: [PATCH 37/42] Address review comments --- pkg/instancegroups/instancegroups.go | 51 ++++++++++++++-------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index ae9b1317e7..26b695f8fc 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -151,7 +151,7 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd settings := resolveSettings(cluster, r.CloudGroup.InstanceGroup, numInstances) - concurrency := 0 + runningDrains := 0 maxConcurrency := settings.MaxUnavailable.IntValue() if maxConcurrency == 0 { @@ -166,24 +166,26 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd terminateChan := make(chan error, maxConcurrency) for uIdx, u := range update { - go r.drainTerminateAndWait(u, rollingUpdateData, terminateChan, isBastion, sleepAfterTerminate) - concurrency++ + go func(m *cloudinstances.CloudInstanceGroupMember) { + terminateChan <- r.drainTerminateAndWait(m, rollingUpdateData, isBastion, sleepAfterTerminate) + }(u) + runningDrains++ // Wait until after one node is deleted and its replacement validates before the concurrent draining // in case the current spec does not result in usable nodes. - if concurrency < maxConcurrency && (!noneReady || uIdx > 0) { + if runningDrains < maxConcurrency && (!noneReady || uIdx > 0) { continue } err = <-terminateChan - concurrency-- + runningDrains-- if err != nil { - return waitForPendingBeforeReturningError(concurrency, terminateChan, err) + return waitForPendingBeforeReturningError(runningDrains, terminateChan, err) } err = r.maybeValidate(rollingUpdateData, validationTimeout) if err != nil { - return waitForPendingBeforeReturningError(concurrency, terminateChan, err) + return waitForPendingBeforeReturningError(runningDrains, terminateChan, err) } if rollingUpdateData.Interactive { @@ -202,13 +204,15 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd } } + // Validation tends to return failures from the start of drain until the replacement is + // fully ready, so sweep up as many completions as we can before starting the next drain. sweep: - for concurrency > 0 { + for runningDrains > 0 { select { case err = <-terminateChan: - concurrency-- + runningDrains-- if err != nil { - return waitForPendingBeforeReturningError(concurrency, terminateChan, err) + return waitForPendingBeforeReturningError(runningDrains, terminateChan, err) } default: break sweep @@ -216,12 +220,12 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd } } - if concurrency > 0 { - for concurrency > 0 { + if runningDrains > 0 { + for runningDrains > 0 { err = <-terminateChan - concurrency-- + runningDrains-- if err != nil { - return waitForPendingBeforeReturningError(concurrency, terminateChan, err) + return waitForPendingBeforeReturningError(runningDrains, terminateChan, err) } } @@ -234,10 +238,10 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd return nil } -func waitForPendingBeforeReturningError(concurrency int, terminateChan chan error, err error) error { - for concurrency > 0 { +func waitForPendingBeforeReturningError(runningDrains int, terminateChan chan error, err error) error { + for runningDrains > 0 { <-terminateChan - concurrency-- + runningDrains-- } return err } @@ -300,7 +304,7 @@ func (r *RollingUpdateInstanceGroup) patchTaint(rollingUpdateData *RollingUpdate return err } -func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, terminateChan chan error, isBastion bool, sleepAfterTerminate time.Duration) { +func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, isBastion bool, sleepAfterTerminate time.Duration) error { instanceId := u.ID nodeName := "" @@ -321,8 +325,7 @@ func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.Clo if err := r.DrainNode(u, rollingUpdateData); err != nil { if rollingUpdateData.FailOnDrainError { - terminateChan <- fmt.Errorf("failed to drain node %q: %v", nodeName, err) - return + return fmt.Errorf("failed to drain node %q: %v", nodeName, err) } klog.Infof("Ignoring error draining node %q: %v", nodeName, err) } @@ -339,23 +342,21 @@ func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.Clo } else { klog.Infof("deleting node %q from kubernetes", nodeName) if err := r.deleteNode(u.Node, rollingUpdateData); err != nil { - terminateChan <- fmt.Errorf("error deleting node %q: %v", nodeName, err) - return + return fmt.Errorf("error deleting node %q: %v", nodeName, err) } } } if err := r.DeleteInstance(u); err != nil { klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err) - terminateChan <- err - return + return err } // Wait for the minimum interval klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate) time.Sleep(sleepAfterTerminate) - terminateChan <- nil + return nil } func (r *RollingUpdateInstanceGroup) maybeValidate(rollingUpdateData *RollingUpdateCluster, validationTimeout time.Duration) error { From dcdf08c853fc9bf43b97ec335b36de509706acaa Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sun, 26 Jan 2020 12:29:07 -0800 Subject: [PATCH 38/42] Add validation of RollingUpdate field --- pkg/apis/kops/validation/BUILD.bazel | 2 + pkg/apis/kops/validation/instancegroup.go | 6 +++ pkg/apis/kops/validation/validation.go | 21 +++++++++ pkg/apis/kops/validation/validation_test.go | 48 +++++++++++++++++++++ 4 files changed, 77 insertions(+) diff --git a/pkg/apis/kops/validation/BUILD.bazel b/pkg/apis/kops/validation/BUILD.bazel index 5686375814..13c1dccd5e 100644 --- a/pkg/apis/kops/validation/BUILD.bazel +++ b/pkg/apis/kops/validation/BUILD.bazel @@ -26,6 +26,7 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/aws/arn:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", @@ -46,6 +47,7 @@ go_test( "//pkg/apis/kops:go_default_library", "//upup/pkg/fi:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/pkg/apis/kops/validation/instancegroup.go b/pkg/apis/kops/validation/instancegroup.go index e61a25a9b6..67efee3ef0 100644 --- a/pkg/apis/kops/validation/instancegroup.go +++ b/pkg/apis/kops/validation/instancegroup.go @@ -133,6 +133,12 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) error { return err } + if g.Spec.RollingUpdate != nil { + if errs := validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath("rollingUpdate")); len(errs) > 0 { + return errs.ToAggregate() + } + } + return nil } diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index 6e03dfe9ac..a338f80b0c 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/blang/semver" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/api/validation" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -121,6 +122,10 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er allErrs = append(allErrs, validateContainerRuntime(&spec.ContainerRuntime, fieldPath.Child("containerRuntime"))...) } + if spec.RollingUpdate != nil { + allErrs = append(allErrs, validateRollingUpdate(spec.RollingUpdate, fieldPath.Child("rollingUpdate"))...) + } + return allErrs } @@ -431,3 +436,19 @@ func validateContainerRuntime(runtime *string, fldPath *field.Path) field.ErrorL return allErrs } + +func validateRollingUpdate(rollingUpdate *kops.RollingUpdate, fldpath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if rollingUpdate.MaxUnavailable != nil { + unavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, 1, false) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable, + fmt.Sprintf("Unable to parse: %v", err))) + } + if unavailable < 0 { + allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable, "Cannot be negative")) + } + } + + return allErrs +} diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index 61cb46fdbb..cbf28d3242 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -19,6 +19,7 @@ package validation import ( "testing" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -384,3 +385,50 @@ func Test_Validate_Calico(t *testing.T) { testErrors(t, g.Input, errs, g.ExpectedErrors) } } + +func Test_Validate_RollingUpdate(t *testing.T) { + grid := []struct { + Input kops.RollingUpdate + ExpectedErrors []string + }{ + { + Input: kops.RollingUpdate{}, + }, + { + Input: kops.RollingUpdate{ + MaxUnavailable: intStr(intstr.FromInt(0)), + }, + }, + { + Input: kops.RollingUpdate{ + MaxUnavailable: intStr(intstr.FromString("0%")), + }, + }, + { + Input: kops.RollingUpdate{ + MaxUnavailable: intStr(intstr.FromString("nope")), + }, + ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"}, + }, + { + Input: kops.RollingUpdate{ + MaxUnavailable: intStr(intstr.FromInt(-1)), + }, + ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"}, + }, + { + Input: kops.RollingUpdate{ + MaxUnavailable: intStr(intstr.FromString("-1%")), + }, + ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"}, + }, + } + for _, g := range grid { + errs := validateRollingUpdate(&g.Input, field.NewPath("TestField")) + testErrors(t, g.Input, errs, g.ExpectedErrors) + } +} + +func intStr(i intstr.IntOrString) *intstr.IntOrString { + return &i +} From 03af6b80b5262472c61fd35f7c4792fe687cdf7a Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Mon, 27 Jan 2020 10:29:28 -0800 Subject: [PATCH 39/42] Adds test and fixes incorrect flag --- k8s/crds/kops.k8s.io_clusters.yaml | 9 ++++++ nodeup/pkg/model/kube_scheduler.go | 17 +++++------ nodeup/pkg/model/kube_scheduler_test.go | 29 ++++++++++++++++--- pkg/apis/kops/v1alpha1/componentconfig.go | 4 +++ pkg/apis/kops/v1alpha1/conversion.go | 4 --- .../kops/v1alpha1/zz_generated.conversion.go | 16 +++++----- .../kops/v1alpha1/zz_generated.deepcopy.go | 5 ++++ pkg/apis/kops/v1alpha2/BUILD.bazel | 1 - pkg/apis/kops/v1alpha2/componentconfig.go | 4 +++ pkg/apis/kops/v1alpha2/conversion.go | 26 ----------------- .../kops/v1alpha2/zz_generated.conversion.go | 16 +++++----- .../kops/v1alpha2/zz_generated.deepcopy.go | 5 ++++ 12 files changed, 78 insertions(+), 58 deletions(-) delete mode 100644 pkg/apis/kops/v1alpha2/conversion.go diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index 71f4093b3a..49bea005dc 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -1609,6 +1609,11 @@ spec: kubeScheduler: description: KubeSchedulerConfig is the configuration for the kube-scheduler properties: + burst: + description: Burst sets the maximum qps to send to apiserver after + the burst quota is exhausted + format: int32 + type: integer featureGates: additionalProperties: type: string @@ -1677,6 +1682,10 @@ spec: and the cloud provider as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/' format: int32 type: integer + qps: + description: Qps sets the maximum qps to send to apiserver after + the burst quota is exhausted + type: string usePolicyConfigMap: description: UsePolicyConfigMap enable setting the scheduler policy from a configmap diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index b8cff08983..2c7c454bfa 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -38,16 +38,15 @@ import ( // ClientConnectionConfig is used by kube-scheduler to talk to the api server type ClientConnectionConfig struct { Burst int32 `yaml:"burst,omitempty"` - Kubeconfig *string `yaml:"kubeconfig"` + Kubeconfig string `yaml:"kubeconfig"` QPS *float64 `yaml:"qps,omitempty"` } // SchedulerConfig is used to generate the config file type SchedulerConfig struct { - APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - BindTimeoutSeconds *int64 `yaml:"bindTimeoutSeconds,omitempty"` - ClientConnection *ClientConnectionConfig `yaml:"clientConnection,omitempty"` + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + ClientConnection ClientConnectionConfig `yaml:"clientConnection,omitempty"` } // KubeSchedulerBuilder install kube-scheduler @@ -57,7 +56,7 @@ type KubeSchedulerBuilder struct { var _ fi.ModelBuilder = &KubeSchedulerBuilder{} -var defaultKubeConfig = "/var/lib/kube-scheduler/kubeconfig" +const defaultKubeConfig = "/var/lib/kube-scheduler/kubeconfig" // Build is responsible for building the manifest for the kube-scheduler func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { @@ -128,8 +127,8 @@ func NewSchedulerConfig() *SchedulerConfig { schedConfig := new(SchedulerConfig) schedConfig.APIVersion = "kubescheduler.config.k8s.io/v1alpha1" schedConfig.Kind = "KubeSchedulerConfiguration" - schedConfig.ClientConnection = new(ClientConnectionConfig) - schedConfig.ClientConnection.Kubeconfig = &defaultKubeConfig + schedConfig.ClientConnection = ClientConnectionConfig{} + schedConfig.ClientConnection.Kubeconfig = defaultKubeConfig return schedConfig } @@ -145,7 +144,7 @@ func (b *KubeSchedulerBuilder) buildPod(useConfigFile bool) (*v1.Pod, error) { flags = append(flags, "--config="+"/var/lib/kube-scheduler/config.yaml") } else { // Add kubeconfig flag - flags = append(flags, "--config="+defaultKubeConfig) + flags = append(flags, "--kubeconfig="+defaultKubeConfig) } if c.UsePolicyConfigMap != nil { diff --git a/nodeup/pkg/model/kube_scheduler_test.go b/nodeup/pkg/model/kube_scheduler_test.go index 140da7eb81..d52936462d 100644 --- a/nodeup/pkg/model/kube_scheduler_test.go +++ b/nodeup/pkg/model/kube_scheduler_test.go @@ -25,17 +25,38 @@ import ( "k8s.io/kops/pkg/configbuilder" ) -func TestParseBasic(t *testing.T) { +func TestParseDefault(t *testing.T) { expect := []byte( `apiVersion: kubescheduler.config.k8s.io/v1alpha1 kind: KubeSchedulerConfiguration clientConnection: kubeconfig: /var/lib/kube-scheduler/kubeconfig - qps: 3.1 `) - qps, _ := resource.ParseQuantity("3.1") - s := &kops.KubeSchedulerConfig{Qps: &qps} + s := &kops.KubeSchedulerConfig{} + + yaml, err := configbuilder.BuildConfigYaml(s, NewSchedulerConfig()) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if !bytes.Equal(yaml, expect) { + t.Errorf("unexpected result: \n%s, expected: \n%s", yaml, expect) + } +} + +func TestParse(t *testing.T) { + expect := []byte( + `apiVersion: kubescheduler.config.k8s.io/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + burst: 100 + kubeconfig: /var/lib/kube-scheduler/kubeconfig + qps: 3.1 +`) + qps, _ := resource.ParseQuantity("3.1") + + s := &kops.KubeSchedulerConfig{Qps: &qps, Burst: 100} yaml, err := configbuilder.BuildConfigYaml(s, NewSchedulerConfig()) if err != nil { diff --git a/pkg/apis/kops/v1alpha1/componentconfig.go b/pkg/apis/kops/v1alpha1/componentconfig.go index 2b7fa7c587..dfcb28fe21 100644 --- a/pkg/apis/kops/v1alpha1/componentconfig.go +++ b/pkg/apis/kops/v1alpha1/componentconfig.go @@ -609,6 +609,10 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` + // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted + Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"` + // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted + Burst int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha1/conversion.go b/pkg/apis/kops/v1alpha1/conversion.go index 5e9ec20310..69f7d549e0 100644 --- a/pkg/apis/kops/v1alpha1/conversion.go +++ b/pkg/apis/kops/v1alpha1/conversion.go @@ -38,10 +38,6 @@ func Convert_v1alpha1_BastionSpec_To_kops_BastionSpec(in *BastionSpec, out *kops return nil } -func Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { - return autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in, out, s) -} - func Convert_kops_BastionSpec_To_v1alpha1_BastionSpec(in *kops.BastionSpec, out *BastionSpec, s conversion.Scope) error { out.PublicName = in.BastionPublicName out.IdleTimeout = in.IdleTimeoutSeconds diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index c8ae1505f8..d1b21da2e5 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -813,11 +813,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*kops.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(a.(*kops.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*kops.TopologySpec)(nil), (*TopologySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_kops_TopologySpec_To_v1alpha1_TopologySpec(a.(*kops.TopologySpec), b.(*TopologySpec), scope) }); err != nil { @@ -3661,6 +3656,8 @@ func autoConvert_v1alpha1_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes + out.Qps = in.Qps + out.Burst = in.Burst return nil } @@ -3685,11 +3682,16 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *ko out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes - // WARNING: in.Qps requires manual conversion: does not exist in peer-type - // WARNING: in.Burst requires manual conversion: does not exist in peer-type + out.Qps = in.Qps + out.Burst = in.Burst return nil } +// Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig is an autogenerated conversion function. +func Convert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in, out, s) +} + func autoConvert_v1alpha1_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *KubeletConfigSpec, out *kops.KubeletConfigSpec, s conversion.Scope) error { out.APIServers = in.APIServers out.AnonymousAuth = in.AnonymousAuth diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index c27185539a..ba8f86806e 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -2415,6 +2415,11 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } + if in.Qps != nil { + in, out := &in.Qps, &out.Qps + x := (*in).DeepCopy() + *out = &x + } return } diff --git a/pkg/apis/kops/v1alpha2/BUILD.bazel b/pkg/apis/kops/v1alpha2/BUILD.bazel index d6651713ce..21341d46a6 100644 --- a/pkg/apis/kops/v1alpha2/BUILD.bazel +++ b/pkg/apis/kops/v1alpha2/BUILD.bazel @@ -7,7 +7,6 @@ go_library( "cluster.go", "componentconfig.go", "containerdconfig.go", - "conversion.go", "defaults.go", "doc.go", "dockerconfig.go", diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index 25ef37748a..091058a4a2 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -610,6 +610,10 @@ type KubeSchedulerConfig struct { // which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider // as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/ MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"` + // Qps sets the maximum qps to send to apiserver after the burst quota is exhausted + Qps *resource.Quantity `json:"qps,omitempty"` + // Burst sets the maximum qps to send to apiserver after the burst quota is exhausted + Burst int32 `json:"burst,omitempty"` } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/kops/v1alpha2/conversion.go b/pkg/apis/kops/v1alpha2/conversion.go deleted file mode 100644 index 9efc23e516..0000000000 --- a/pkg/apis/kops/v1alpha2/conversion.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/kops/pkg/apis/kops" -) - -func Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { - return autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in, out, s) -} diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index f80210bbf2..3f41ed8b0a 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -863,11 +863,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*kops.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(a.(*kops.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope) - }); err != nil { - return err - } return nil } @@ -3931,6 +3926,8 @@ func autoConvert_v1alpha2_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes + out.Qps = in.Qps + out.Burst = in.Burst return nil } @@ -3955,11 +3952,16 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *ko out.UsePolicyConfigMap = in.UsePolicyConfigMap out.FeatureGates = in.FeatureGates out.MaxPersistentVolumes = in.MaxPersistentVolumes - // WARNING: in.Qps requires manual conversion: does not exist in peer-type - // WARNING: in.Burst requires manual conversion: does not exist in peer-type + out.Qps = in.Qps + out.Burst = in.Burst return nil } +// Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig is an autogenerated conversion function. +func Convert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *kops.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in, out, s) +} + func autoConvert_v1alpha2_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *KubeletConfigSpec, out *kops.KubeletConfigSpec, s conversion.Scope) error { out.APIServers = in.APIServers out.AnonymousAuth = in.AnonymousAuth diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 2994fda089..839dc262f9 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -2486,6 +2486,11 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = new(int32) **out = **in } + if in.Qps != nil { + in, out := &in.Qps, &out.Qps + x := (*in).DeepCopy() + *out = &x + } return } From 5f930683ed0ec8ff52628f8ee33db5f9798db154 Mon Sep 17 00:00:00 2001 From: Ciprian Hacman Date: Fri, 3 Jan 2020 18:25:16 +0200 Subject: [PATCH 40/42] Update support for Amazon Linux 2 --- nodeup/pkg/distros/distribution.go | 35 +++++++++++++------------ nodeup/pkg/distros/identify.go | 3 +-- nodeup/pkg/model/containerd.go | 2 +- nodeup/pkg/model/docker.go | 24 ++++++++--------- nodeup/pkg/model/packages.go | 15 ++++++----- pkg/model/components/containerd.go | 2 +- pkg/resources/aws/aws.go | 12 ++++----- upup/pkg/fi/cloudup/awsup/aws_cloud.go | 36 ++++++++++++++++++-------- 8 files changed, 74 insertions(+), 55 deletions(-) diff --git a/nodeup/pkg/distros/distribution.go b/nodeup/pkg/distros/distribution.go index 35543f1699..4b01f60d05 100644 --- a/nodeup/pkg/distros/distribution.go +++ b/nodeup/pkg/distros/distribution.go @@ -24,18 +24,19 @@ import ( type Distribution string var ( - DistributionJessie Distribution = "jessie" - DistributionDebian9 Distribution = "debian9" - DistributionDebian10 Distribution = "buster" - DistributionXenial Distribution = "xenial" - DistributionBionic Distribution = "bionic" - DistributionRhel7 Distribution = "rhel7" - DistributionCentos7 Distribution = "centos7" - DistributionRhel8 Distribution = "rhel8" - DistributionCentos8 Distribution = "centos8" - DistributionCoreOS Distribution = "coreos" - DistributionFlatcar Distribution = "flatcar" - DistributionContainerOS Distribution = "containeros" + DistributionJessie Distribution = "jessie" + DistributionDebian9 Distribution = "debian9" + DistributionDebian10 Distribution = "buster" + DistributionXenial Distribution = "xenial" + DistributionBionic Distribution = "bionic" + DistributionAmazonLinux2 Distribution = "amazonlinux2" + DistributionRhel7 Distribution = "rhel7" + DistributionCentos7 Distribution = "centos7" + DistributionRhel8 Distribution = "rhel8" + DistributionCentos8 Distribution = "centos8" + DistributionCoreOS Distribution = "coreos" + DistributionFlatcar Distribution = "flatcar" + DistributionContainerOS Distribution = "containeros" ) func (d Distribution) BuildTags() []string { @@ -50,6 +51,8 @@ func (d Distribution) BuildTags() []string { t = []string{"_xenial"} case DistributionBionic: t = []string{"_bionic"} + case DistributionAmazonLinux2: + t = []string{"_amazonlinux2"} case DistributionCentos7: t = []string{"_centos7"} case DistributionRhel7: @@ -88,7 +91,7 @@ func (d Distribution) IsDebianFamily() bool { return true case DistributionXenial, DistributionBionic: return true - case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8: + case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2: return false case DistributionCoreOS, DistributionFlatcar, DistributionContainerOS: return false @@ -104,7 +107,7 @@ func (d Distribution) IsUbuntu() bool { return false case DistributionXenial, DistributionBionic: return true - case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8: + case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2: return false case DistributionCoreOS, DistributionFlatcar, DistributionContainerOS: return false @@ -116,7 +119,7 @@ func (d Distribution) IsUbuntu() bool { func (d Distribution) IsRHELFamily() bool { switch d { - case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8: + case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2: return true case DistributionJessie, DistributionXenial, DistributionBionic, DistributionDebian9, DistributionDebian10: return false @@ -132,7 +135,7 @@ func (d Distribution) IsSystemd() bool { switch d { case DistributionJessie, DistributionXenial, DistributionBionic, DistributionDebian9, DistributionDebian10: return true - case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8: + case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2: return true case DistributionCoreOS, DistributionFlatcar: return true diff --git a/nodeup/pkg/distros/identify.go b/nodeup/pkg/distros/identify.go index f73f487a91..713dc3b886 100644 --- a/nodeup/pkg/distros/identify.go +++ b/nodeup/pkg/distros/identify.go @@ -113,8 +113,7 @@ func FindDistribution(rootfs string) (Distribution, error) { return DistributionContainerOS, nil } if strings.HasPrefix(line, "PRETTY_NAME=\"Amazon Linux 2") { - // TODO: This is a hack. Amazon Linux is "special" and should get its own distro entry - return DistributionRhel7, nil + return DistributionAmazonLinux2, nil } } klog.Warningf("unhandled /etc/os-release info %q", string(osRelease)) diff --git a/nodeup/pkg/model/containerd.go b/nodeup/pkg/model/containerd.go index 63bf8b3c26..5941ad84ae 100644 --- a/nodeup/pkg/model/containerd.go +++ b/nodeup/pkg/model/containerd.go @@ -97,7 +97,7 @@ var containerdVersions = []packageVersion{ { PackageVersion: "1.2.10", Name: "containerd.io", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "1.2.10", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm", diff --git a/nodeup/pkg/model/docker.go b/nodeup/pkg/model/docker.go index da54ac2210..d07796c79c 100644 --- a/nodeup/pkg/model/docker.go +++ b/nodeup/pkg/model/docker.go @@ -74,7 +74,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "1.11.2", Name: "docker-engine", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "1.11.2", Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.11.2-1.el7.centos.x86_64.rpm", @@ -117,7 +117,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "1.12.1", Name: "docker-engine", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "1.12.1", Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.1-1.el7.centos.x86_64.rpm", @@ -176,7 +176,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "1.12.3", Name: "docker-engine", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "1.12.3", Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.3-1.el7.centos.x86_64.rpm", @@ -250,7 +250,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "1.12.6", Name: "docker-engine", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "1.12.6", Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm", @@ -324,7 +324,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "1.13.1", Name: "docker-engine", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "1.13.1", Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.13.1-1.el7.centos.x86_64.rpm", @@ -409,7 +409,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "17.03.2", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "17.03.2.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm", @@ -508,7 +508,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "17.09.0", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "17.09.0.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.09.0.ce-1.el7.centos.x86_64.rpm", @@ -598,7 +598,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "18.06.1", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "18.06.1.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.1.ce-3.el7.x86_64.rpm", @@ -628,7 +628,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "18.06.2", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "18.06.2.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.2.ce-3.el7.x86_64.rpm", @@ -681,7 +681,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "18.06.3", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "18.06.3.ce", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.3.ce-3.el7.x86_64.rpm", @@ -782,7 +782,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "18.09.9", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "18.09.9", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.09.9-3.el7.x86_64.rpm", @@ -898,7 +898,7 @@ var dockerVersions = []packageVersion{ { PackageVersion: "19.03.4", Name: "docker-ce", - Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7}, + Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2}, Architectures: []Architecture{ArchitectureAmd64}, Version: "19.03.4", Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-19.03.4-3.el7.x86_64.rpm", diff --git a/nodeup/pkg/model/packages.go b/nodeup/pkg/model/packages.go index 1f49cc92d6..1244b1eb93 100644 --- a/nodeup/pkg/model/packages.go +++ b/nodeup/pkg/model/packages.go @@ -56,17 +56,20 @@ func (b *PackagesBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&nodetasks.Package{Name: "libseccomp"}) c.AddTask(&nodetasks.Package{Name: "socat"}) c.AddTask(&nodetasks.Package{Name: "util-linux"}) - - // Handle RHEL 7 and Amazon Linux 2 differently when installing "extras" - if b.Distribution != distros.DistributionRhel7 { - c.AddTask(&nodetasks.Package{Name: "container-selinux"}) - c.AddTask(&nodetasks.Package{Name: "pigz"}) - } else { + // Handle some packages differently for each distro + switch b.Distribution { + case distros.DistributionRhel7: + // Easier to install container-selinux from CentOS than extras c.AddTask(&nodetasks.Package{ Name: "container-selinux", Source: s("http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm"), Hash: s("7de4211fa0dfd240d8827b93763e1eb5f0d56411"), }) + case distros.DistributionAmazonLinux2: + // Amazon Linux 2 doesn't have SELinux enabled by default + default: + c.AddTask(&nodetasks.Package{Name: "container-selinux"}) + c.AddTask(&nodetasks.Package{Name: "pigz"}) } } else { // Hopefully they are already installed diff --git a/pkg/model/components/containerd.go b/pkg/model/components/containerd.go index 2775006880..564fa05881 100644 --- a/pkg/model/components/containerd.go +++ b/pkg/model/components/containerd.go @@ -54,7 +54,7 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o interface{}) error { // Set containerd based on Kubernetes version if fi.StringValue(containerd.Version) == "" { if b.IsKubernetesGTE("1.17") { - containerd.Version = fi.String("1.2.10") + containerd.Version = fi.String("1.3.2") } else if b.IsKubernetesGTE("1.11") { return fmt.Errorf("containerd version is required") } diff --git a/pkg/resources/aws/aws.go b/pkg/resources/aws/aws.go index 5277c8d0db..535caa6e98 100644 --- a/pkg/resources/aws/aws.go +++ b/pkg/resources/aws/aws.go @@ -502,16 +502,16 @@ func (s *dumpState) getImageInfo(imageID string) (*imageInfo, error) { func guessSSHUser(image *ec2.Image) string { owner := aws.StringValue(image.OwnerId) switch owner { - case awsup.WellKnownAccountAmazonSystemLinux2: + case awsup.WellKnownAccountAmazonLinux2, awsup.WellKnownAccountRedhat: return "ec2-user" - case awsup.WellKnownAccountRedhat: - return "ec2-user" - case awsup.WellKnownAccountCoreOS: - return "core" - case awsup.WellKnownAccountKopeio: + case awsup.WellKnownAccountCentOS: + return "centos" + case awsup.WellKnownAccountDebian9, awsup.WellKnownAccountDebian10, awsup.WellKnownAccountKopeio: return "admin" case awsup.WellKnownAccountUbuntu: return "ubuntu" + case awsup.WellKnownAccountCoreOS, awsup.WellKnownAccountFlatcar: + return "core" } name := aws.StringValue(image.Name) diff --git a/upup/pkg/fi/cloudup/awsup/aws_cloud.go b/upup/pkg/fi/cloudup/awsup/aws_cloud.go index 7aa22516e2..99e854bc59 100644 --- a/upup/pkg/fi/cloudup/awsup/aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/aws_cloud.go @@ -86,11 +86,15 @@ const TagNameKopsRole = "kubernetes.io/kops/role" const TagNameClusterOwnershipPrefix = "kubernetes.io/cluster/" const ( - WellKnownAccountKopeio = "383156758163" - WellKnownAccountRedhat = "309956199498" - WellKnownAccountCoreOS = "595879546273" - WellKnownAccountAmazonSystemLinux2 = "137112412989" - WellKnownAccountUbuntu = "099720109477" + WellKnownAccountAmazonLinux2 = "137112412989" + WellKnownAccountCentOS = "679593333241" + WellKnownAccountCoreOS = "595879546273" + WellKnownAccountDebian9 = "379101102735" + WellKnownAccountDebian10 = "136693071363" + WellKnownAccountFlatcar = "075585003325" + WellKnownAccountKopeio = "383156758163" + WellKnownAccountRedhat = "309956199498" + WellKnownAccountUbuntu = "099720109477" ) type AWSCloud interface { @@ -1165,14 +1169,24 @@ func resolveImage(ec2Client ec2iface.EC2API, name string) (*ec2.Image, error) { // Check for well known owner aliases switch owner { - case "kope.io": - owner = WellKnownAccountKopeio - case "coreos.com": + case "amazon", "amazon.com": + owner = WellKnownAccountAmazonLinux2 + case "centos": + owner = WellKnownAccountCentOS + case "coreos", "coreos.com": owner = WellKnownAccountCoreOS - case "redhat.com": + case "debian9": + owner = WellKnownAccountDebian9 + case "debian10": + owner = WellKnownAccountDebian10 + case "flatcar": + owner = WellKnownAccountFlatcar + case "kopeio", "kope.io": + owner = WellKnownAccountKopeio + case "redhat", "redhat.com": owner = WellKnownAccountRedhat - case "amazon.com": - owner = WellKnownAccountAmazonSystemLinux2 + case "ubuntu": + owner = WellKnownAccountUbuntu } request.Owners = []*string{&owner} From 504ac7545a5fb0421c22d51d9dbe6ffa15569ae1 Mon Sep 17 00:00:00 2001 From: Roberto Rodriguez Alcala Date: Tue, 28 Jan 2020 22:26:38 -0800 Subject: [PATCH 41/42] Fixes regression in e2e tests --- nodeup/pkg/model/kube_scheduler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index 2c7c454bfa..f59330f82f 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -63,7 +63,7 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { if !b.IsMaster { return nil } - useConfigFile := b.IsKubernetesGTE("1.11") + useConfigFile := b.IsKubernetesGTE("1.12") { pod, err := b.buildPod(useConfigFile) if err != nil { From 002e6f3369ea7eae29c0ea3623d6c08aaf3a904b Mon Sep 17 00:00:00 2001 From: Justin SB Date: Wed, 29 Jan 2020 10:58:40 -0500 Subject: [PATCH 42/42] Release notes for 1.17.0-alpha.2 --- docs/releases/1.17-NOTES.md | 38 +++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docs/releases/1.17-NOTES.md b/docs/releases/1.17-NOTES.md index 22730c1cda..04fdd4abe9 100644 --- a/docs/releases/1.17-NOTES.md +++ b/docs/releases/1.17-NOTES.md @@ -112,3 +112,41 @@ the notes prior to the release). * DOCS: fix simple typo in readme [@lpmi-13](https://github.com/lpmi-13) [#8005](https://github.com/kubernetes/kops/pull/8005) * Spotinst: Upgrade the Spotinst SDK to version 1.36 [@liranp](https://github.com/liranp) [#8003](https://github.com/kubernetes/kops/pull/8003) * Release 1.17.0-alpha.1 [@justinsb](https://github.com/justinsb) [#7985](https://github.com/kubernetes/kops/pull/7985) + +## 1.17.0-alpha.1 to 1.17.0-alpha.2 + +* Fix mounting Calico "flexvol-driver-host" in CoreOS [@hakman](https://github.com/hakman) [#8062](https://github.com/kubernetes/kops/pull/8062) +* Cherry-pick #8074 to release-1.17 [@johngmyers](https://github.com/johngmyers) [#8084](https://github.com/kubernetes/kops/pull/8084) +* Bump cilium version to 1.6.4 [@olemarkus](https://github.com/olemarkus) [#8022](https://github.com/kubernetes/kops/pull/8022) +* Complete support for Flatcar [@mazzy89](https://github.com/mazzy89) [#7545](https://github.com/kubernetes/kops/pull/7545) +* Canal v3.10 manifest for k8s v1.15+ [@KashifSaadat](https://github.com/KashifSaadat),[@hakman](https://github.com/hakman) [#7917](https://github.com/kubernetes/kops/pull/7917) +* Cherry pick #8095 [@zetaab](https://github.com/zetaab) [#8096](https://github.com/kubernetes/kops/pull/8096) +* test validateCluster twice to make sure it does not flap [@zetaab](https://github.com/zetaab),[@johngmyers](https://github.com/johngmyers) [#8088](https://github.com/kubernetes/kops/pull/8088) +* Add inf1 isntances [@mikesplain](https://github.com/mikesplain) [#8128](https://github.com/kubernetes/kops/pull/8128) +* Add CapacityOptimized to list of supported spot allocation strategies [@gjtempleton](https://github.com/gjtempleton) [#7406](https://github.com/kubernetes/kops/pull/7406) +* Update Calico to v3.10.2 [@hakman](https://github.com/hakman) [#8104](https://github.com/kubernetes/kops/pull/8104) +* Openstack: Fix cluster floating ips [@mitch000001](https://github.com/mitch000001) [#8115](https://github.com/kubernetes/kops/pull/8115) +* cilium: don't try to mount sys/fs/bpf if already mounted [@justinsb](https://github.com/justinsb) [#7832](https://github.com/kubernetes/kops/pull/7832) +* Update copyrights for 2020 [@johngmyers](https://github.com/johngmyers) [#8241](https://github.com/kubernetes/kops/pull/8241) +* Fix protokube osx build [@mikesplain](https://github.com/mikesplain) [#8263](https://github.com/kubernetes/kops/pull/8263) +* Set CLUSTER_NAME env var on amazon-vpc-cni pods [@rifelpet](https://github.com/rifelpet) [#8274](https://github.com/kubernetes/kops/pull/8274) +* Add deprecation warning for older k8s versions [@rifelpet](https://github.com/rifelpet) [#8176](https://github.com/kubernetes/kops/pull/8176) +* Remove kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8273](https://github.com/kubernetes/kops/pull/8273) +* Don't output empty sections in the manifests [@justinsb](https://github.com/justinsb) [#8317](https://github.com/kubernetes/kops/pull/8317) +* Cloud controller template function [@DavidSie](https://github.com/DavidSie) [#7992](https://github.com/kubernetes/kops/pull/7992) +* Configuration to specify no SSH key [@austinmoore-](https://github.com/austinmoore-) [#7096](https://github.com/kubernetes/kops/pull/7096) +* tests: increase timeout in rolling update tests [@justinsb](https://github.com/justinsb) [#8139](https://github.com/kubernetes/kops/pull/8139) +* Fix crossbuild-nodeup-in-docker [@johngmyers](https://github.com/johngmyers) [#8343](https://github.com/kubernetes/kops/pull/8343) +* update gophercloud dependency [@zetaab](https://github.com/zetaab) [#8347](https://github.com/kubernetes/kops/pull/8347) +* Update Terraform resource names to be 0.12 compatible. [@rifelpet](https://github.com/rifelpet) [#7957](https://github.com/kubernetes/kops/pull/7957) +* Allow local filesystem state stores (to aid CI pull-request workflows) [@ari-becker](https://github.com/ari-becker),[@rifelpet](https://github.com/rifelpet) [#6465](https://github.com/kubernetes/kops/pull/6465) +* Fix issues with older versions of k8s for basic clusters [@hakman](https://github.com/hakman) [#8248](https://github.com/kubernetes/kops/pull/8248) +* Use IAMPrefix() for hostedzone [@lazzarello](https://github.com/lazzarello) [#8366](https://github.com/kubernetes/kops/pull/8366) +* Fix scheduler policy configmap args [@vvbogdanov87](https://github.com/vvbogdanov87) [#8386](https://github.com/kubernetes/kops/pull/8386) +* Add Cilium.EnablePolicy back into templates [@olemarkus](https://github.com/olemarkus) [#8379](https://github.com/kubernetes/kops/pull/8379) +* Bump etcd-manager to 3.0.20200116 (#8310) [@mmerrill3](https://github.com/mmerrill3) [#8399](https://github.com/kubernetes/kops/pull/8399) +* CoreDNS default image bump to 1.6.6 to resolve CVE [@gjtempleton](https://github.com/gjtempleton) [#8333](https://github.com/kubernetes/kops/pull/8333) +* Don't load nonexistent calico-client cert when CNI is Cilium [@johngmyers](https://github.com/johngmyers) [#8338](https://github.com/kubernetes/kops/pull/8338) +* Kops releases - prefix git tags with v [@rifelpet](https://github.com/rifelpet) [#8373](https://github.com/kubernetes/kops/pull/8373) +* EBS Root Volume Termination [@tioxy](https://github.com/tioxy) [#7865](https://github.com/kubernetes/kops/pull/7865) +* Alicloud: etcd-manager support [@bittopaz](https://github.com/bittopaz) [#8016](https://github.com/kubernetes/kops/pull/8016)