From d96296f66b0c1b7d0a660da7b9047168dc07bf08 Mon Sep 17 00:00:00 2001 From: changzhen Date: Sat, 25 Feb 2023 18:51:24 +0800 Subject: [PATCH] modify judgement of placement changed Signed-off-by: changzhen --- pkg/scheduler/helper.go | 73 +++++++ pkg/scheduler/helper_test.go | 374 +++++++++++++++++++++++++++++++++++ pkg/scheduler/scheduler.go | 6 +- 3 files changed, 451 insertions(+), 2 deletions(-) create mode 100644 pkg/scheduler/helper.go create mode 100644 pkg/scheduler/helper_test.go diff --git a/pkg/scheduler/helper.go b/pkg/scheduler/helper.go new file mode 100644 index 000000000..39e9ca269 --- /dev/null +++ b/pkg/scheduler/helper.go @@ -0,0 +1,73 @@ +package scheduler + +import ( + "encoding/json" + "reflect" + + "k8s.io/klog/v2" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +func placementChanged( + placement policyv1alpha1.Placement, + appliedPlacementStr string, + schedulerObservingAffinityName string, +) bool { + if appliedPlacementStr == "" { + return true + } + + appliedPlacement := policyv1alpha1.Placement{} + err := json.Unmarshal([]byte(appliedPlacementStr), &appliedPlacement) + if err != nil { + klog.Errorf("Failed to unmarshal applied placement string: %v", err) + return false + } + + // first check: entire placement does not change + if reflect.DeepEqual(placement, appliedPlacement) { + return false + } + + // second check: except for ClusterAffinities, the placement has changed + if !reflect.DeepEqual(placement.ClusterAffinity, appliedPlacement.ClusterAffinity) || + !reflect.DeepEqual(placement.ClusterTolerations, appliedPlacement.ClusterTolerations) || + !reflect.DeepEqual(placement.SpreadConstraints, appliedPlacement.SpreadConstraints) || + !reflect.DeepEqual(placement.ReplicaScheduling, appliedPlacement.ReplicaScheduling) { + return true + } + + // third check: check weather ClusterAffinities has changed + return clusterAffinitiesChanged(placement.ClusterAffinities, appliedPlacement.ClusterAffinities, schedulerObservingAffinityName) +} + +func clusterAffinitiesChanged( + clusterAffinities, appliedClusterAffinities []policyv1alpha1.ClusterAffinityTerm, + schedulerObservingAffinityName string, +) bool { + if schedulerObservingAffinityName == "" { + return true + } + + var clusterAffinityTerm, appliedClusterAffinityTerm *policyv1alpha1.ClusterAffinityTerm + for index := range clusterAffinities { + if clusterAffinities[index].AffinityName == schedulerObservingAffinityName { + clusterAffinityTerm = &clusterAffinities[index] + break + } + } + for index := range appliedClusterAffinities { + if appliedClusterAffinities[index].AffinityName == schedulerObservingAffinityName { + appliedClusterAffinityTerm = &appliedClusterAffinities[index] + break + } + } + if clusterAffinityTerm == nil || appliedClusterAffinityTerm == nil { + return true + } + if !reflect.DeepEqual(&clusterAffinityTerm, &appliedClusterAffinityTerm) { + return true + } + return false +} diff --git a/pkg/scheduler/helper_test.go b/pkg/scheduler/helper_test.go new file mode 100644 index 000000000..8a6d118ab --- /dev/null +++ b/pkg/scheduler/helper_test.go @@ -0,0 +1,374 @@ +package scheduler + +import ( + "encoding/json" + "testing" + + corev1 "k8s.io/api/core/v1" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +func Test_needConsideredPlacementChanged(t *testing.T) { + type args struct { + placement policyv1alpha1.Placement + appliedPlacement *policyv1alpha1.Placement + schedulerObservingAffinityName string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "nil appliedPlacement", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + appliedPlacement: nil, + schedulerObservingAffinityName: "", + }, + want: true, + }, + { + name: "same placement and appliedPlacement", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + schedulerObservingAffinityName: "", + }, + want: false, + }, + { + name: "different ClusterAffinity field in placement and appliedPlacement", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinity: nil, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m2"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + schedulerObservingAffinityName: "", + }, + want: true, + }, + { + name: "different ClusterTolerations field in placement and appliedPlacement", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpExists, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + schedulerObservingAffinityName: "", + }, + want: true, + }, + { + name: "different SpreadConstraints field in placement and appliedPlacement", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 2, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + schedulerObservingAffinityName: "", + }, + want: true, + }, + { + name: "different ReplicaScheduling field in placement and appliedPlacement", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + ClusterTolerations: []corev1.Toleration{ + {Key: "foo", Operator: corev1.TolerationOpEqual, Value: "foo", Effect: corev1.TaintEffectNoSchedule}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {MinGroups: 1, MaxGroups: 1, SpreadByField: policyv1alpha1.SpreadByFieldCluster}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + }, + }, + schedulerObservingAffinityName: "", + }, + want: true, + }, + { + name: "different ClusterAffinities field with empty schedulerObservingAffinityName", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{}, + schedulerObservingAffinityName: "", + }, + want: true, + }, + { + name: "update the target ClusterAffinityTerm", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1", "m2"}}, + }, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + }, + }, + schedulerObservingAffinityName: "group1", + }, + want: true, + }, + { + name: "delete the target ClusterAffinityTerm", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m2"}}, + }, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + { + AffinityName: "group2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m2"}}, + }, + }, + }, + schedulerObservingAffinityName: "group1", + }, + want: true, + }, + { + name: "delete the whole ClusterAffinities", + args: args{ + placement: policyv1alpha1.Placement{}, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + }, + }, + schedulerObservingAffinityName: "group1", + }, + want: true, + }, + { + name: "add a new ClusterAffinityTerm and don't update the target ClusterAffinityTerm ", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + { + AffinityName: "group2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1", "m2"}}, + }, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + }, + }, + schedulerObservingAffinityName: "group1", + }, + want: false, + }, + { + name: "update a ClusterAffinityTerm and don't update the target ClusterAffinityTerm ", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + { + AffinityName: "group2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1", "m2"}}, + }, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + { + AffinityName: "group2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m2"}}, + }, + }, + }, + schedulerObservingAffinityName: "group1", + }, + want: false, + }, + { + name: "delete a ClusterAffinityTerm and don't update the target ClusterAffinityTerm ", + args: args{ + placement: policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + }, + }, + appliedPlacement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "group1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m1"}}, + }, + { + AffinityName: "group2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ClusterNames: []string{"m2"}}, + }, + }, + }, + schedulerObservingAffinityName: "group1", + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var appliedPlacementStr string + if tt.args.appliedPlacement == nil { + appliedPlacementStr = "" + } else { + placementBytes, err := json.Marshal(*tt.args.appliedPlacement) + if err != nil { + t.Errorf("jsom marshal failed: %v", err) + } + appliedPlacementStr = string(placementBytes) + } + if got := placementChanged(tt.args.placement, appliedPlacementStr, tt.args.schedulerObservingAffinityName); got != tt.want { + t.Errorf("placementChanged() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index a19d7c52f..8602b46c1 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -365,7 +365,8 @@ func (s *Scheduler) doScheduleBinding(namespace, name string) (err error) { if err != nil { return err } - if appliedPlacement := util.GetLabelValue(rb.Annotations, util.PolicyPlacementAnnotation); policyPlacementStr != appliedPlacement { + appliedPlacementStr := util.GetLabelValue(rb.Annotations, util.PolicyPlacementAnnotation) + if placementChanged(*rb.Spec.Placement, appliedPlacementStr, rb.Status.SchedulerObservedAffinityName) { // policy placement changed, need schedule klog.Infof("Start to schedule ResourceBinding(%s/%s) as placement changed", namespace, name) err = s.scheduleResourceBinding(rb, policyPlacementStr) @@ -427,7 +428,8 @@ func (s *Scheduler) doScheduleClusterBinding(name string) (err error) { if err != nil { return err } - if appliedPlacement := util.GetLabelValue(crb.Annotations, util.PolicyPlacementAnnotation); policyPlacementStr != appliedPlacement { + appliedPlacementStr := util.GetLabelValue(crb.Annotations, util.PolicyPlacementAnnotation) + if placementChanged(*crb.Spec.Placement, appliedPlacementStr, crb.Status.SchedulerObservedAffinityName) { // policy placement changed, need schedule klog.Infof("Start to schedule ClusterResourceBinding(%s) as placement changed", name) err = s.scheduleClusterResourceBinding(crb, policyPlacementStr)