mirror of https://github.com/kubernetes/kops.git
Move NodeLabels into the NodeupConfig
This commit is contained in:
parent
35645b49c4
commit
44fb283e3f
|
@ -114,10 +114,7 @@ func (r *NodeReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||
return ctrl.Result{}, fmt.Errorf("unable to load instance group object for node %s: %v", node.Name, err)
|
||||
}
|
||||
|
||||
labels, err := nodelabels.BuildNodeLabels(cluster, ig)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("unable to build config for node %s: %v", node.Name, err)
|
||||
}
|
||||
labels := nodelabels.BuildNodeLabels(cluster, ig)
|
||||
|
||||
lifecycle, err := r.getInstanceLifecycle(ctx, node)
|
||||
if err != nil {
|
||||
|
|
|
@ -521,12 +521,6 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro
|
|||
// For bootstrapping reasons, protokube sets the critical labels for kops-controller to run.
|
||||
if b.Cluster.IsKubernetesGTE("1.16") {
|
||||
c.NodeLabels = nil
|
||||
} else {
|
||||
nodeLabels, err := nodelabels.BuildNodeLabels(b.Cluster, b.InstanceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.NodeLabels = nodeLabels
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
|
|
|
@ -7,6 +7,7 @@ go_library(
|
|||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
"//pkg/nodelabels:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//util/pkg/architectures:go_default_library",
|
||||
"//util/pkg/reflectutils:go_default_library",
|
||||
|
|
|
@ -18,6 +18,7 @@ package nodeup
|
|||
|
||||
import (
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/nodelabels"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/util/pkg/architectures"
|
||||
"k8s.io/kops/util/pkg/reflectutils"
|
||||
|
@ -112,6 +113,10 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) *Config
|
|||
}
|
||||
}
|
||||
|
||||
// We include the NodeLabels in the userdata even for Kubernetes 1.16 and later so that
|
||||
// rolling update will still replace nodes when they change.
|
||||
config.KubeletConfig.NodeLabels = nodelabels.BuildNodeLabels(cluster, instanceGroup)
|
||||
|
||||
config.KubeletConfig.Taints = append(config.KubeletConfig.Taints, instanceGroup.Spec.Taints...)
|
||||
|
||||
return &config
|
||||
|
|
|
@ -281,7 +281,6 @@ func (b *BootstrapScript) Run(c *fi.Context) error {
|
|||
|
||||
"IGSpec": func() (string, error) {
|
||||
spec := make(map[string]interface{})
|
||||
spec["nodeLabels"] = b.ig.Spec.NodeLabels
|
||||
|
||||
hooks, err := b.getRelevantHooks(b.ig.Spec.Hooks, b.ig.Spec.Role)
|
||||
if err != nil {
|
||||
|
|
|
@ -33,7 +33,7 @@ const (
|
|||
|
||||
// BuildNodeLabels returns the node labels for the specified instance group
|
||||
// This moved from the kubelet to a central controller in kubernetes 1.16
|
||||
func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (map[string]string, error) {
|
||||
func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) map[string]string {
|
||||
isMaster := instanceGroup.Spec.Role == kops.InstanceGroupRoleMaster
|
||||
|
||||
// Merge KubeletConfig for NodeLabels
|
||||
|
@ -71,5 +71,5 @@ func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (
|
|||
nodeLabels[k] = v
|
||||
}
|
||||
|
||||
return nodeLabels, nil
|
||||
return nodeLabels
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func TestBuildNodeLabels(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
out, _ := BuildNodeLabels(test.cluster, test.ig)
|
||||
out := BuildNodeLabels(test.cluster, test.ig)
|
||||
if !reflect.DeepEqual(out, test.expected) {
|
||||
t.Fatalf("Actual result:\n%v\nExpect:\n%v", out, test.expected)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue