Fix json merge behavior so IG kubelet config takes precedence

Update upup/pkg/fi/cloudup/populate_instancegroup_spec.go

Co-authored-by: Ciprian Hacman <ciprian@hakman.dev>
This commit is contained in:
Ole Markus With 2022-09-25 13:47:18 +02:00
parent 8ff240fb09
commit 788b9d7508
3 changed files with 158 additions and 26 deletions

View File

@ -177,6 +177,28 @@ func TestPopulateCluster_StorageDefault(t *testing.T) {
}
}
func TestPopulateCluster_EvictionHard(t *testing.T) {
cloud, c := buildMinimalCluster()
err := PerformAssignments(c, cloud)
if err != nil {
t.Fatalf("error from PerformAssignments: %v", err)
}
c.Spec.Kubelet = &kopsapi.KubeletConfigSpec{
EvictionHard: fi.String("memory.available<250Mi"),
}
full, err := mockedPopulateClusterSpec(c, cloud)
if err != nil {
t.Fatalf("Unexpected error from PopulateCluster: %v", err)
}
if fi.StringValue(full.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" {
t.Fatalf("Unexpected StorageBackend: %v", *full.Spec.Kubelet.EvictionHard)
}
}
func build(c *kopsapi.Cluster) (*kopsapi.Cluster, error) {
cloud, err := BuildCloud(c)
if err != nil {

View File

@ -20,6 +20,7 @@ import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
@ -74,7 +75,7 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup,
ig := &kops.InstanceGroup{}
reflectutils.JSONMergeStruct(ig, input)
spec := &ig.Spec
igSpec := &ig.Spec
// TODO: Clean up
if ig.IsMaster() {
@ -223,38 +224,56 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup,
ig.Spec.Manager = kops.InstanceManagerCloudGroup
}
if spec.Kubelet == nil {
spec.Kubelet = &kops.KubeletConfigSpec{}
if igSpec.Kubelet == nil {
igSpec.Kubelet = &kops.KubeletConfigSpec{}
}
kubeletConfig := spec.Kubelet
var igKubeletConfig *kops.KubeletConfigSpec
// Start with the cluster kubelet config
if ig.IsMaster() {
if cluster.Spec.MasterKubelet != nil {
igKubeletConfig = cluster.Spec.MasterKubelet.DeepCopy()
} else {
igKubeletConfig = &kops.KubeletConfigSpec{}
}
// A few settings in Kubelet override those in MasterKubelet. I'm not sure why.
if cluster.Spec.Kubelet != nil && cluster.Spec.Kubelet.AnonymousAuth != nil && !*cluster.Spec.Kubelet.AnonymousAuth {
igKubeletConfig.AnonymousAuth = fi.Bool(false)
}
} else {
if cluster.Spec.Kubelet != nil {
igKubeletConfig = cluster.Spec.Kubelet.DeepCopy()
} else {
igKubeletConfig = &kops.KubeletConfigSpec{}
}
}
// We include the NodeLabels in the userdata even for Kubernetes 1.16 and later so that
// rolling update will still replace nodes when they change.
kubeletConfig.NodeLabels = nodelabels.BuildNodeLabels(cluster, ig)
igKubeletConfig.NodeLabels = nodelabels.BuildNodeLabels(cluster, ig)
kubeletConfig.Taints = append(kubeletConfig.Taints, spec.Taints...)
if ig.IsMaster() {
reflectutils.JSONMergeStruct(kubeletConfig, cluster.Spec.MasterKubelet)
// A few settings in Kubelet override those in MasterKubelet. I'm not sure why.
if cluster.Spec.Kubelet != nil && cluster.Spec.Kubelet.AnonymousAuth != nil && !*cluster.Spec.Kubelet.AnonymousAuth {
kubeletConfig.AnonymousAuth = fi.Bool(false)
}
} else {
reflectutils.JSONMergeStruct(kubeletConfig, cluster.Spec.Kubelet)
}
useSecureKubelet := fi.BoolValue(igKubeletConfig.AnonymousAuth)
// While slices are overridden in most cases, taints are explicitly merged
taints := sets.NewString(igKubeletConfig.Taints...)
taints.Insert(igSpec.Taints...)
if ig.Spec.Kubelet != nil {
useSecureKubelet := fi.BoolValue(kubeletConfig.AnonymousAuth)
reflectutils.JSONMergeStruct(kubeletConfig, spec.Kubelet)
if useSecureKubelet {
kubeletConfig.AnonymousAuth = fi.Bool(false)
}
taints.Insert(igSpec.Kubelet.Taints...)
}
if cluster.Spec.Kubelet != nil {
taints.Insert(cluster.Spec.Kubelet.Taints...)
}
if ig.Spec.Kubelet != nil {
reflectutils.JSONMergeStruct(igKubeletConfig, ig.Spec.Kubelet)
}
igKubeletConfig.Taints = taints.List()
if useSecureKubelet {
igKubeletConfig.AnonymousAuth = fi.Bool(false)
}
ig.Spec.Kubelet = igKubeletConfig
return ig, nil
}

View File

@ -109,7 +109,6 @@ func TestPopulateInstanceGroup_AddTaintsCollision3(t *testing.T) {
}
channel := &kopsapi.Channel{}
cloud, err := BuildCloud(cluster)
if err != nil {
t.Fatalf("error from BuildCloud: %v", err)
@ -119,7 +118,99 @@ func TestPopulateInstanceGroup_AddTaintsCollision3(t *testing.T) {
t.Fatalf("error from PopulateInstanceGroupSpec: %v", err)
}
if len(output.Spec.Kubelet.Taints) != 2 {
t.Errorf("Expected only 2 taints, got %d", len(output.Spec.Kubelet.Taints))
t.Errorf("Expected 2 taints, got %d", len(output.Spec.Kubelet.Taints))
}
}
func TestPopulateInstanceGroup_EvictionHard(t *testing.T) {
_, cluster := buildMinimalCluster()
cluster.Spec.Kubelet = &kopsapi.KubeletConfigSpec{
EvictionHard: fi.String("memory.available<350Mi"),
}
input := buildMinimalNodeInstanceGroup()
input.Spec.Kubelet = &kopsapi.KubeletConfigSpec{
EvictionHard: fi.String("memory.available<250Mi"),
}
channel := &kopsapi.Channel{}
cloud, err := BuildCloud(cluster)
if err != nil {
t.Fatalf("error from BuildCloud: %v", err)
}
output, err := PopulateInstanceGroupSpec(cluster, input, cloud, channel)
if err != nil {
t.Fatalf("error from PopulateInstanceGroupSpec: %v", err)
}
if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" {
t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard))
}
}
func TestPopulateInstanceGroup_EvictionHard3(t *testing.T) {
_, cluster := buildMinimalCluster()
cluster.Spec.Kubelet = &kopsapi.KubeletConfigSpec{
EvictionHard: fi.String("memory.available<350Mi"),
}
input := buildMinimalMasterInstanceGroup("us-test-1")
channel := &kopsapi.Channel{}
cloud, err := BuildCloud(cluster)
if err != nil {
t.Fatalf("error from BuildCloud: %v", err)
}
output, err := PopulateInstanceGroupSpec(cluster, input, cloud, channel)
if err != nil {
t.Fatalf("error from PopulateInstanceGroupSpec: %v", err)
}
// There is no default EvictionHard
if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "" {
t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard))
}
}
func TestPopulateInstanceGroup_EvictionHard4(t *testing.T) {
_, cluster := buildMinimalCluster()
cluster.Spec.MasterKubelet = &kopsapi.KubeletConfigSpec{
EvictionHard: fi.String("memory.available<350Mi"),
}
input := buildMinimalMasterInstanceGroup("us-test-1")
channel := &kopsapi.Channel{}
cloud, err := BuildCloud(cluster)
if err != nil {
t.Fatalf("error from BuildCloud: %v", err)
}
output, err := PopulateInstanceGroupSpec(cluster, input, cloud, channel)
if err != nil {
t.Fatalf("error from PopulateInstanceGroupSpec: %v", err)
}
if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "memory.available<350Mi" {
t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard))
}
}
func TestPopulateInstanceGroup_EvictionHard2(t *testing.T) {
_, cluster := buildMinimalCluster()
input := buildMinimalNodeInstanceGroup()
input.Spec.Kubelet = &kopsapi.KubeletConfigSpec{
EvictionHard: fi.String("memory.available<250Mi"),
}
channel := &kopsapi.Channel{}
cloud, err := BuildCloud(cluster)
if err != nil {
t.Fatalf("error from BuildCloud: %v", err)
}
output, err := PopulateInstanceGroupSpec(cluster, input, cloud, channel)
if err != nil {
t.Fatalf("error from PopulateInstanceGroupSpec: %v", err)
}
if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" {
t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard))
}
}