mirror of https://github.com/kubernetes/kops.git
Fixup kubelet and controlPlaneKubelet config building
We need to differentiate between the version of the control plane and the version of the nodes.
This commit is contained in:
parent
6dfbd4633b
commit
0f13759bdf
|
@ -72,7 +72,10 @@ type NodeupModelContext struct {
|
|||
// usesNoneDNS is true if the cluster runs with dns=none (which uses fixed IPs, for example a load balancer, instead of DNS)
|
||||
usesNoneDNS bool
|
||||
|
||||
kubernetesVersion *kopsmodel.KubernetesVersion
|
||||
// Deprecated: This should be renamed to controlPlaneVersion / nodeVersion;
|
||||
// controlPlaneVersion should probably/ideally only be populated on control plane nodes.
|
||||
kubernetesVersion *kopsmodel.KubernetesVersion
|
||||
|
||||
bootstrapCerts map[string]*nodetasks.BootstrapCert
|
||||
bootstrapKeypairIDs map[string]string
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func MustParseKubernetesVersion(versionString string) *KubernetesVersion {
|
|||
return kubernetesVersion
|
||||
}
|
||||
|
||||
func (v *KubernetesVersion) String() string {
|
||||
func (v KubernetesVersion) String() string {
|
||||
return v.versionString
|
||||
}
|
||||
|
||||
|
@ -62,13 +62,13 @@ func IsBaseURL(kubernetesVersion string) bool {
|
|||
|
||||
// IsBaseURL checks if the version string is a URL, rather than a version identifier.
|
||||
// URLs are typically used for CI builds and during development.
|
||||
func (v *KubernetesVersion) IsBaseURL() bool {
|
||||
func (v KubernetesVersion) IsBaseURL() bool {
|
||||
return IsBaseURL(v.versionString)
|
||||
}
|
||||
|
||||
// IsGTE checks if the version is greater than or equal (>=) to the specified version.
|
||||
// It panics if the kubernetes version in the cluster is invalid, or if the version is invalid.
|
||||
func (v *KubernetesVersion) IsGTE(version string) bool {
|
||||
func (v KubernetesVersion) IsGTE(version string) bool {
|
||||
parsedVersion, err := util.ParseKubernetesVersion(version)
|
||||
if err != nil || parsedVersion == nil {
|
||||
panic(fmt.Sprintf("error parsing version %q: %v", version, err))
|
||||
|
@ -84,6 +84,11 @@ func (v *KubernetesVersion) IsGTE(version string) bool {
|
|||
|
||||
// IsLT checks if the version is strictly less (<) than the specified version.
|
||||
// It panics if the kubernetes version in the cluster is invalid, or if the version is invalid.
|
||||
func (v *KubernetesVersion) IsLT(version string) bool {
|
||||
func (v KubernetesVersion) IsLT(version string) bool {
|
||||
return !v.IsGTE(version)
|
||||
}
|
||||
|
||||
// Minor returns the minor version of the Kubernetes version.
|
||||
func (v KubernetesVersion) Minor() int {
|
||||
return int(v.version.Minor)
|
||||
}
|
||||
|
|
|
@ -185,11 +185,11 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(cluster *kops.Cluster) error
|
|||
|
||||
if clusterSpec.CloudProvider.AWS != nil {
|
||||
|
||||
if _, found := c.FeatureGates["InTreePluginAWSUnregister"]; !found && b.IsKubernetesLT("1.31") {
|
||||
if _, found := c.FeatureGates["InTreePluginAWSUnregister"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.31") {
|
||||
c.FeatureGates["InTreePluginAWSUnregister"] = "true"
|
||||
}
|
||||
|
||||
if _, found := c.FeatureGates["CSIMigrationAWS"]; !found && b.IsKubernetesLT("1.27") {
|
||||
if _, found := c.FeatureGates["CSIMigrationAWS"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.27") {
|
||||
c.FeatureGates["CSIMigrationAWS"] = "true"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ func (b *AWSCloudControllerManagerOptionsBuilder) BuildOptions(cluster *kops.Clu
|
|||
|
||||
if eccm.Image == "" {
|
||||
// See https://us.gcr.io/k8s-artifacts-prod/provider-aws/cloud-controller-manager
|
||||
switch b.KubernetesVersion.Minor {
|
||||
switch b.ControlPlaneKubernetesVersion().Minor() {
|
||||
case 25:
|
||||
eccm.Image = "registry.k8s.io/provider-aws/cloud-controller-manager:v1.25.15"
|
||||
case 26:
|
||||
|
@ -94,7 +94,7 @@ func (b *AWSCloudControllerManagerOptionsBuilder) BuildOptions(cluster *kops.Clu
|
|||
}
|
||||
}
|
||||
|
||||
if b.IsKubernetesLT("1.25") {
|
||||
if b.ControlPlaneKubernetesVersion().IsLT("1.25") {
|
||||
eccm.EnableLeaderMigration = fi.PtrTo(true)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"testing"
|
||||
|
||||
kopsapi "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
@ -47,20 +46,15 @@ func Test_Build_Containerd_Supported_Version(t *testing.T) {
|
|||
c := buildContainerdCluster(v)
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
optionsContext, err := NewOptionsContext(c, b, b.KubeletSupportedVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from ParseKubernetesVersion %s: %v", v, err)
|
||||
t.Fatalf("unexpected error from NewOptionsContext: %v", err)
|
||||
}
|
||||
|
||||
ob := &ContainerdOptionsBuilder{
|
||||
&OptionsContext{
|
||||
AssetBuilder: b,
|
||||
KubernetesVersion: *version,
|
||||
},
|
||||
OptionsContext: optionsContext,
|
||||
}
|
||||
|
||||
err = ob.BuildOptions(c)
|
||||
if err != nil {
|
||||
if err := ob.BuildOptions(c); err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -39,15 +39,59 @@ import (
|
|||
type OptionsContext struct {
|
||||
ClusterName string
|
||||
|
||||
// Deprecated: Prefer using NodeKubernetesVersion() and ControlPlaneKubernetesVersion()
|
||||
KubernetesVersion semver.Version
|
||||
|
||||
AssetBuilder *assets.AssetBuilder
|
||||
|
||||
nodeKubernetesVersion kopsmodel.KubernetesVersion
|
||||
controlPlaneKubernetesVersion kopsmodel.KubernetesVersion
|
||||
}
|
||||
|
||||
func NewOptionsContext(cluster *kops.Cluster, assetBuilder *assets.AssetBuilder, maxKubeletSupportedVersion string) (*OptionsContext, error) {
|
||||
optionsContext := &OptionsContext{
|
||||
ClusterName: cluster.ObjectMeta.Name,
|
||||
AssetBuilder: assetBuilder,
|
||||
}
|
||||
|
||||
sv, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine kubernetes version from %q", cluster.Spec.KubernetesVersion)
|
||||
}
|
||||
optionsContext.KubernetesVersion = *sv
|
||||
|
||||
controlPlaneKubernetesVersion, err := kopsmodel.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine kubernetes version from %q: %w", cluster.Spec.KubernetesVersion, err)
|
||||
}
|
||||
nodeKubernetesVersion := controlPlaneKubernetesVersion
|
||||
if maxKubeletSupportedVersion != "" {
|
||||
nodeKubernetesVersion, err = kopsmodel.ParseKubernetesVersion(maxKubeletSupportedVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine kubernetes version from %q: %w", maxKubeletSupportedVersion, err)
|
||||
}
|
||||
}
|
||||
|
||||
optionsContext.nodeKubernetesVersion = *nodeKubernetesVersion
|
||||
optionsContext.controlPlaneKubernetesVersion = *controlPlaneKubernetesVersion
|
||||
|
||||
return optionsContext, nil
|
||||
}
|
||||
|
||||
func (c *OptionsContext) NodeKubernetesVersion() kopsmodel.KubernetesVersion {
|
||||
return c.nodeKubernetesVersion
|
||||
}
|
||||
|
||||
func (c *OptionsContext) ControlPlaneKubernetesVersion() kopsmodel.KubernetesVersion {
|
||||
return c.controlPlaneKubernetesVersion
|
||||
}
|
||||
|
||||
// Deprecated: prefer using NodeKubernetesVersion() and ControlPlaneKubernetesVersion()
|
||||
func (c *OptionsContext) IsKubernetesGTE(version string) bool {
|
||||
return util.IsKubernetesGTE(version, c.KubernetesVersion)
|
||||
}
|
||||
|
||||
// Deprecated: prefer using NodeKubernetesVersion() and ControlPlaneKubernetesVersion()
|
||||
func (c *OptionsContext) IsKubernetesLT(version string) bool {
|
||||
return !c.IsKubernetesGTE(version)
|
||||
}
|
||||
|
|
|
@ -73,13 +73,13 @@ func (b *GCPCloudControllerManagerOptionsBuilder) BuildOptions(cluster *kops.Clu
|
|||
|
||||
if ccmConfig.Image == "" {
|
||||
// TODO: Implement CCM image publishing
|
||||
switch b.KubernetesVersion.Minor {
|
||||
switch b.ControlPlaneKubernetesVersion().Minor() {
|
||||
default:
|
||||
ccmConfig.Image = "gcr.io/k8s-staging-cloud-provider-gcp/cloud-controller-manager:master"
|
||||
}
|
||||
}
|
||||
|
||||
if b.IsKubernetesLT("1.25") {
|
||||
if b.ControlPlaneKubernetesVersion().IsLT("1.25") {
|
||||
ccmConfig.EnableLeaderMigration = fi.PtrTo(true)
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o *kops.Cluster) erro
|
|||
// TLDR; set this too low, and have a few EBS Volumes, and you will spam AWS api
|
||||
|
||||
{
|
||||
klog.V(4).Infof("Kubernetes version %q supports AttachDetachReconcileSyncPeriod; will configure", b.KubernetesVersion)
|
||||
klog.V(4).Infof("Kubernetes version %q supports AttachDetachReconcileSyncPeriod; will configure", b.ControlPlaneKubernetesVersion().String())
|
||||
// If not set ... or set to 0s ... which is stupid
|
||||
if kcm.AttachDetachReconcileSyncPeriod == nil ||
|
||||
kcm.AttachDetachReconcileSyncPeriod.Duration.String() == "0s" {
|
||||
|
@ -149,11 +149,11 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o *kops.Cluster) erro
|
|||
kcm.FeatureGates = make(map[string]string)
|
||||
}
|
||||
|
||||
if _, found := kcm.FeatureGates["InTreePluginAWSUnregister"]; !found && b.IsKubernetesLT("1.31") {
|
||||
if _, found := kcm.FeatureGates["InTreePluginAWSUnregister"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.31") {
|
||||
kcm.FeatureGates["InTreePluginAWSUnregister"] = "true"
|
||||
}
|
||||
|
||||
if _, found := kcm.FeatureGates["CSIMigrationAWS"]; !found && b.IsKubernetesLT("1.27") {
|
||||
if _, found := kcm.FeatureGates["CSIMigrationAWS"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.27") {
|
||||
kcm.FeatureGates["CSIMigrationAWS"] = "true"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/model"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
|
||||
"k8s.io/kops/upup/pkg/fi/loader"
|
||||
|
@ -38,54 +39,68 @@ var _ loader.ClusterOptionsBuilder = &KubeletOptionsBuilder{}
|
|||
|
||||
// BuildOptions is responsible for filling the defaults for the kubelet
|
||||
func (b *KubeletOptionsBuilder) BuildOptions(cluster *kops.Cluster) error {
|
||||
clusterSpec := &cluster.Spec
|
||||
|
||||
if clusterSpec.Kubelet == nil {
|
||||
clusterSpec.Kubelet = &kops.KubeletConfigSpec{}
|
||||
if cluster.Spec.Kubelet == nil {
|
||||
cluster.Spec.Kubelet = &kops.KubeletConfigSpec{}
|
||||
}
|
||||
if clusterSpec.ControlPlaneKubelet == nil {
|
||||
clusterSpec.ControlPlaneKubelet = &kops.KubeletConfigSpec{}
|
||||
if cluster.Spec.ControlPlaneKubelet == nil {
|
||||
cluster.Spec.ControlPlaneKubelet = &kops.KubeletConfigSpec{}
|
||||
}
|
||||
|
||||
if err := b.configureKubelet(cluster, cluster.Spec.Kubelet, b.NodeKubernetesVersion()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.configureKubelet(cluster, cluster.Spec.ControlPlaneKubelet, b.ControlPlaneKubernetesVersion()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We _do_ allow debugging handlers, so we can do logs
|
||||
// This does allow more access than we would like though
|
||||
cluster.Spec.ControlPlaneKubelet.EnableDebuggingHandlers = fi.PtrTo(true)
|
||||
|
||||
// IsolateControlPlane enables the legacy behaviour, where master pods on a separate network
|
||||
// In newer versions of kubernetes, most of that functionality has been removed though
|
||||
if fi.ValueOf(cluster.Spec.Networking.IsolateControlPlane) {
|
||||
cluster.Spec.ControlPlaneKubelet.EnableDebuggingHandlers = fi.PtrTo(false)
|
||||
cluster.Spec.ControlPlaneKubelet.HairpinMode = "none"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *KubeletOptionsBuilder) configureKubelet(cluster *kops.Cluster, kubelet *kops.KubeletConfigSpec, kubernetesVersion model.KubernetesVersion) error {
|
||||
// Standard options
|
||||
clusterSpec.Kubelet.EnableDebuggingHandlers = fi.PtrTo(true)
|
||||
clusterSpec.Kubelet.PodManifestPath = "/etc/kubernetes/manifests"
|
||||
clusterSpec.Kubelet.LogLevel = fi.PtrTo(int32(2))
|
||||
clusterSpec.Kubelet.ClusterDomain = clusterSpec.ClusterDNSDomain
|
||||
kubelet.EnableDebuggingHandlers = fi.PtrTo(true)
|
||||
kubelet.PodManifestPath = "/etc/kubernetes/manifests"
|
||||
kubelet.LogLevel = fi.PtrTo(int32(2))
|
||||
kubelet.ClusterDomain = cluster.Spec.ClusterDNSDomain
|
||||
|
||||
// AllowPrivileged is deprecated and removed in v1.14.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/71835
|
||||
if clusterSpec.Kubelet.AllowPrivileged != nil {
|
||||
if kubelet.AllowPrivileged != nil {
|
||||
// If it is explicitly set to false, return an error, because this
|
||||
// behavior is no longer supported in v1.14 (the default was true, prior).
|
||||
if !*clusterSpec.Kubelet.AllowPrivileged {
|
||||
if !*kubelet.AllowPrivileged {
|
||||
klog.Warningf("Kubelet's --allow-privileged flag is no longer supported in v1.14.")
|
||||
}
|
||||
// Explicitly set it to nil, so it won't be passed on the command line.
|
||||
clusterSpec.Kubelet.AllowPrivileged = nil
|
||||
kubelet.AllowPrivileged = nil
|
||||
}
|
||||
|
||||
if clusterSpec.Kubelet.ClusterDNS == "" {
|
||||
if clusterSpec.KubeDNS != nil && clusterSpec.KubeDNS.NodeLocalDNS != nil && fi.ValueOf(clusterSpec.KubeDNS.NodeLocalDNS.Enabled) {
|
||||
clusterSpec.Kubelet.ClusterDNS = clusterSpec.KubeDNS.NodeLocalDNS.LocalIP
|
||||
if kubelet.ClusterDNS == "" {
|
||||
if cluster.Spec.KubeDNS != nil && cluster.Spec.KubeDNS.NodeLocalDNS != nil && fi.ValueOf(cluster.Spec.KubeDNS.NodeLocalDNS.Enabled) {
|
||||
kubelet.ClusterDNS = cluster.Spec.KubeDNS.NodeLocalDNS.LocalIP
|
||||
} else {
|
||||
ip, err := WellKnownServiceIP(&clusterSpec.Networking, 10)
|
||||
ip, err := WellKnownServiceIP(&cluster.Spec.Networking, 10)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterSpec.Kubelet.ClusterDNS = ip.String()
|
||||
kubelet.ClusterDNS = ip.String()
|
||||
}
|
||||
}
|
||||
|
||||
clusterSpec.ControlPlaneKubelet.RegisterSchedulable = fi.PtrTo(false)
|
||||
// Replace the CIDR with a CIDR allocated by KCM (the default, but included for clarity)
|
||||
// We _do_ allow debugging handlers, so we can do logs
|
||||
// This does allow more access than we would like though
|
||||
clusterSpec.ControlPlaneKubelet.EnableDebuggingHandlers = fi.PtrTo(true)
|
||||
|
||||
{
|
||||
// For pod eviction in low memory or empty disk situations
|
||||
if clusterSpec.Kubelet.EvictionHard == nil {
|
||||
if kubelet.EvictionHard == nil {
|
||||
evictionHard := []string{
|
||||
// TODO: Some people recommend 250Mi, but this would hurt small machines
|
||||
"memory.available<100Mi",
|
||||
|
@ -97,65 +112,56 @@ func (b *KubeletOptionsBuilder) BuildOptions(cluster *kops.Cluster) error {
|
|||
"imagefs.available<10%",
|
||||
"imagefs.inodesFree<5%",
|
||||
}
|
||||
clusterSpec.Kubelet.EvictionHard = fi.PtrTo(strings.Join(evictionHard, ","))
|
||||
kubelet.EvictionHard = fi.PtrTo(strings.Join(evictionHard, ","))
|
||||
}
|
||||
}
|
||||
|
||||
// use kubeconfig instead of api-servers
|
||||
const kubeconfigPath = "/var/lib/kubelet/kubeconfig"
|
||||
clusterSpec.Kubelet.KubeconfigPath = kubeconfigPath
|
||||
clusterSpec.ControlPlaneKubelet.KubeconfigPath = kubeconfigPath
|
||||
|
||||
// IsolateControlPlane enables the legacy behaviour, where master pods on a separate network
|
||||
// In newer versions of kubernetes, most of that functionality has been removed though
|
||||
if fi.ValueOf(clusterSpec.Networking.IsolateControlPlane) {
|
||||
clusterSpec.ControlPlaneKubelet.EnableDebuggingHandlers = fi.PtrTo(false)
|
||||
clusterSpec.ControlPlaneKubelet.HairpinMode = "none"
|
||||
}
|
||||
kubelet.KubeconfigPath = kubeconfigPath
|
||||
|
||||
cloudProvider := cluster.GetCloudProvider()
|
||||
|
||||
clusterSpec.Kubelet.CgroupRoot = "/"
|
||||
kubelet.CgroupRoot = "/"
|
||||
|
||||
klog.V(1).Infof("Cloud Provider: %s", cloudProvider)
|
||||
if cloudProvider == kops.CloudProviderAWS {
|
||||
clusterSpec.Kubelet.CloudProvider = "aws"
|
||||
kubelet.CloudProvider = "aws"
|
||||
}
|
||||
|
||||
if cloudProvider == kops.CloudProviderDO {
|
||||
clusterSpec.Kubelet.CloudProvider = "external"
|
||||
kubelet.CloudProvider = "external"
|
||||
}
|
||||
|
||||
if cloudProvider == kops.CloudProviderGCE {
|
||||
clusterSpec.Kubelet.CloudProvider = "gce"
|
||||
clusterSpec.Kubelet.HairpinMode = "promiscuous-bridge"
|
||||
kubelet.CloudProvider = "gce"
|
||||
kubelet.HairpinMode = "promiscuous-bridge"
|
||||
|
||||
if clusterSpec.CloudConfig == nil {
|
||||
clusterSpec.CloudConfig = &kops.CloudConfiguration{}
|
||||
if cluster.Spec.CloudConfig == nil {
|
||||
cluster.Spec.CloudConfig = &kops.CloudConfiguration{}
|
||||
}
|
||||
clusterSpec.CloudProvider.GCE.Multizone = fi.PtrTo(true)
|
||||
clusterSpec.CloudProvider.GCE.NodeTags = fi.PtrTo(gce.TagForRole(b.ClusterName, kops.InstanceGroupRoleNode))
|
||||
|
||||
cluster.Spec.CloudProvider.GCE.Multizone = fi.PtrTo(true)
|
||||
cluster.Spec.CloudProvider.GCE.NodeTags = fi.PtrTo(gce.TagForRole(b.ClusterName, kops.InstanceGroupRoleNode))
|
||||
}
|
||||
|
||||
if cloudProvider == kops.CloudProviderHetzner {
|
||||
clusterSpec.Kubelet.CloudProvider = "external"
|
||||
kubelet.CloudProvider = "external"
|
||||
}
|
||||
|
||||
if cloudProvider == kops.CloudProviderOpenstack {
|
||||
clusterSpec.Kubelet.CloudProvider = "openstack"
|
||||
kubelet.CloudProvider = "openstack"
|
||||
}
|
||||
|
||||
if cloudProvider == kops.CloudProviderAzure {
|
||||
clusterSpec.Kubelet.CloudProvider = "azure"
|
||||
kubelet.CloudProvider = "azure"
|
||||
}
|
||||
|
||||
if cloudProvider == kops.CloudProviderScaleway {
|
||||
clusterSpec.Kubelet.CloudProvider = "external"
|
||||
kubelet.CloudProvider = "external"
|
||||
}
|
||||
|
||||
if clusterSpec.ExternalCloudControllerManager != nil {
|
||||
clusterSpec.Kubelet.CloudProvider = "external"
|
||||
if cluster.Spec.ExternalCloudControllerManager != nil {
|
||||
kubelet.CloudProvider = "external"
|
||||
}
|
||||
|
||||
// Prevent image GC from pruning the pause image
|
||||
|
@ -165,43 +171,42 @@ func (b *KubeletOptionsBuilder) BuildOptions(cluster *kops.Cluster) error {
|
|||
if image, err = b.AssetBuilder.RemapImage(image); err != nil {
|
||||
return err
|
||||
}
|
||||
clusterSpec.Kubelet.PodInfraContainerImage = image
|
||||
kubelet.PodInfraContainerImage = image
|
||||
|
||||
if clusterSpec.Kubelet.FeatureGates == nil {
|
||||
clusterSpec.Kubelet.FeatureGates = make(map[string]string)
|
||||
if kubelet.FeatureGates == nil {
|
||||
kubelet.FeatureGates = make(map[string]string)
|
||||
}
|
||||
|
||||
if clusterSpec.CloudProvider.AWS != nil {
|
||||
if _, found := clusterSpec.Kubelet.FeatureGates["InTreePluginAWSUnregister"]; !found && b.IsKubernetesLT("1.31") {
|
||||
clusterSpec.Kubelet.FeatureGates["InTreePluginAWSUnregister"] = "true"
|
||||
if cluster.Spec.CloudProvider.AWS != nil {
|
||||
if _, found := kubelet.FeatureGates["InTreePluginAWSUnregister"]; !found && kubernetesVersion.IsLT("1.31") {
|
||||
kubelet.FeatureGates["InTreePluginAWSUnregister"] = "true"
|
||||
}
|
||||
|
||||
if _, found := clusterSpec.Kubelet.FeatureGates["CSIMigrationAWS"]; !found && b.IsKubernetesLT("1.27") {
|
||||
clusterSpec.Kubelet.FeatureGates["CSIMigrationAWS"] = "true"
|
||||
if _, found := kubelet.FeatureGates["CSIMigrationAWS"]; !found && kubernetesVersion.IsLT("1.27") {
|
||||
kubelet.FeatureGates["CSIMigrationAWS"] = "true"
|
||||
}
|
||||
}
|
||||
|
||||
// Set systemd as the default cgroup driver for kubelet
|
||||
if clusterSpec.Kubelet.CgroupDriver == "" {
|
||||
clusterSpec.Kubelet.CgroupDriver = "systemd"
|
||||
if kubelet.CgroupDriver == "" {
|
||||
kubelet.CgroupDriver = "systemd"
|
||||
}
|
||||
|
||||
if clusterSpec.Kubelet.ProtectKernelDefaults == nil {
|
||||
clusterSpec.Kubelet.ProtectKernelDefaults = fi.PtrTo(true)
|
||||
if kubelet.ProtectKernelDefaults == nil {
|
||||
kubelet.ProtectKernelDefaults = fi.PtrTo(true)
|
||||
}
|
||||
|
||||
// We do not enable graceful shutdown when using amazonaws due to leaking ENIs.
|
||||
// Graceful shutdown is also not available by default on k8s < 1.21
|
||||
if clusterSpec.Kubelet.ShutdownGracePeriod == nil && clusterSpec.Networking.AmazonVPC == nil {
|
||||
clusterSpec.Kubelet.ShutdownGracePeriod = &metav1.Duration{Duration: time.Duration(30 * time.Second)}
|
||||
clusterSpec.Kubelet.ShutdownGracePeriodCriticalPods = &metav1.Duration{Duration: time.Duration(10 * time.Second)}
|
||||
} else if clusterSpec.Networking.AmazonVPC != nil {
|
||||
clusterSpec.Kubelet.ShutdownGracePeriod = &metav1.Duration{Duration: 0}
|
||||
clusterSpec.Kubelet.ShutdownGracePeriodCriticalPods = &metav1.Duration{Duration: 0}
|
||||
if kubelet.ShutdownGracePeriod == nil && cluster.Spec.Networking.AmazonVPC == nil {
|
||||
kubelet.ShutdownGracePeriod = &metav1.Duration{Duration: time.Duration(30 * time.Second)}
|
||||
kubelet.ShutdownGracePeriodCriticalPods = &metav1.Duration{Duration: time.Duration(10 * time.Second)}
|
||||
} else if cluster.Spec.Networking.AmazonVPC != nil {
|
||||
kubelet.ShutdownGracePeriod = &metav1.Duration{Duration: 0}
|
||||
kubelet.ShutdownGracePeriodCriticalPods = &metav1.Duration{Duration: 0}
|
||||
}
|
||||
|
||||
clusterSpec.Kubelet.RegisterSchedulable = fi.PtrTo(true)
|
||||
clusterSpec.ControlPlaneKubelet.RegisterSchedulable = fi.PtrTo(true)
|
||||
kubelet.RegisterSchedulable = fi.PtrTo(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
@ -40,21 +39,16 @@ func buildKubeletTestCluster() *kops.Cluster {
|
|||
func buildOptions(cluster *kops.Cluster) error {
|
||||
ab := assets.NewAssetBuilder(vfs.Context, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, false)
|
||||
|
||||
ver, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
||||
optionsContext, err := NewOptionsContext(cluster, ab, ab.KubeletSupportedVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builder := KubeletOptionsBuilder{
|
||||
OptionsContext: &OptionsContext{
|
||||
AssetBuilder: ab,
|
||||
KubernetesVersion: *ver,
|
||||
},
|
||||
OptionsContext: optionsContext,
|
||||
}
|
||||
|
||||
err = builder.BuildOptions(cluster)
|
||||
if err != nil {
|
||||
return nil
|
||||
if err := builder.BuildOptions(cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -63,11 +63,11 @@ func (b *KubeSchedulerOptionsBuilder) BuildOptions(o *kops.Cluster) error {
|
|||
config.FeatureGates = make(map[string]string)
|
||||
}
|
||||
|
||||
if _, found := config.FeatureGates["InTreePluginAWSUnregister"]; !found && b.IsKubernetesLT("1.31") {
|
||||
if _, found := config.FeatureGates["InTreePluginAWSUnregister"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.31") {
|
||||
config.FeatureGates["InTreePluginAWSUnregister"] = "true"
|
||||
}
|
||||
|
||||
if _, found := config.FeatureGates["CSIMigrationAWS"]; !found && b.IsKubernetesLT("1.27") {
|
||||
if _, found := config.FeatureGates["CSIMigrationAWS"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.27") {
|
||||
config.FeatureGates["CSIMigrationAWS"] = "true"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package components
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
@ -33,20 +32,16 @@ func Test_Build_Scheduler(t *testing.T) {
|
|||
c.Spec.KubernetesVersion = v
|
||||
b := assets.NewAssetBuilder(vfs.Context, c.Spec.Assets, c.Spec.KubernetesVersion, false)
|
||||
|
||||
version, err := util.ParseKubernetesVersion(v)
|
||||
optionsContext, err := NewOptionsContext(c, b, b.KubeletSupportedVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from ParseKubernetesVersion %s: %v", v, err)
|
||||
t.Fatalf("error from NewOptionsContext: %v", err)
|
||||
}
|
||||
|
||||
ks := &KubeSchedulerOptionsBuilder{
|
||||
&OptionsContext{
|
||||
AssetBuilder: b,
|
||||
KubernetesVersion: *version,
|
||||
},
|
||||
OptionsContext: optionsContext,
|
||||
}
|
||||
|
||||
err = ks.BuildOptions(c)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from BuildOptions: %v", err)
|
||||
}
|
||||
|
|
|
@ -225,6 +225,11 @@ func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, wellKnownAddre
|
|||
}
|
||||
|
||||
if !hasAPIServer && n.assetBuilder.KubeletSupportedVersion != "" {
|
||||
// Set kubernetes version to avoid spurious rolling-update
|
||||
config.KubernetesVersion = n.assetBuilder.KubeletSupportedVersion
|
||||
|
||||
// TODO: Rename KubernetesVersion to ControlPlaneVersion
|
||||
|
||||
if err := igModel.ForceKubernetesVersion(n.assetBuilder.KubeletSupportedVersion); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
|
||||
kopsapi "k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
"k8s.io/kops/pkg/apis/kops/validation"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/pkg/client/simple"
|
||||
|
@ -295,15 +294,10 @@ func (c *populateClusterSpec) run(ctx context.Context, clientset simple.Clientse
|
|||
if cluster.Spec.KubernetesVersion == "" {
|
||||
return fmt.Errorf("KubernetesVersion is required")
|
||||
}
|
||||
sv, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine kubernetes version from %q", cluster.Spec.KubernetesVersion)
|
||||
}
|
||||
|
||||
optionsContext := &components.OptionsContext{
|
||||
ClusterName: cluster.ObjectMeta.Name,
|
||||
KubernetesVersion: *sv,
|
||||
AssetBuilder: c.assetBuilder,
|
||||
optionsContext, err := components.NewOptionsContext(cluster, c.assetBuilder, c.assetBuilder.KubeletSupportedVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var codeModels []loader.ClusterOptionsBuilder
|
||||
|
|
|
@ -29,6 +29,16 @@ type SpecBuilder struct {
|
|||
}
|
||||
|
||||
func (l *SpecBuilder) BuildCompleteSpec(cluster *kopsapi.Cluster) (*kopsapi.Cluster, error) {
|
||||
// Control-plane kubelet config = (base kubelet config + control-plane kubelet config)
|
||||
controlPlaneKubelet := &kopsapi.KubeletConfigSpec{}
|
||||
if cluster.Spec.Kubelet != nil {
|
||||
reflectutils.JSONMergeStruct(controlPlaneKubelet, cluster.Spec.Kubelet)
|
||||
}
|
||||
if cluster.Spec.ControlPlaneKubelet != nil {
|
||||
reflectutils.JSONMergeStruct(controlPlaneKubelet, cluster.Spec.ControlPlaneKubelet)
|
||||
}
|
||||
cluster.Spec.ControlPlaneKubelet = controlPlaneKubelet
|
||||
|
||||
loaded, err := l.OptionsLoader.Build(cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -36,12 +46,6 @@ func (l *SpecBuilder) BuildCompleteSpec(cluster *kopsapi.Cluster) (*kopsapi.Clus
|
|||
completed := &kopsapi.Cluster{}
|
||||
*completed = *loaded
|
||||
|
||||
// Control-plane kubelet config = (base kubelet config + control-plane kubelet config)
|
||||
controlPlaneKubelet := &kopsapi.KubeletConfigSpec{}
|
||||
reflectutils.JSONMergeStruct(controlPlaneKubelet, completed.Spec.Kubelet)
|
||||
reflectutils.JSONMergeStruct(controlPlaneKubelet, completed.Spec.ControlPlaneKubelet)
|
||||
completed.Spec.ControlPlaneKubelet = controlPlaneKubelet
|
||||
|
||||
klog.V(1).Infof("options: %s", fi.DebugAsJsonStringIndent(completed))
|
||||
return completed, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue