Merge pull request #3321 from Poor12/fix-duplicated

Do not revise replicas when strategy is duplicated
This commit is contained in:
karmada-bot 2023-04-07 08:11:55 +08:00 committed by GitHub
commit 7589eaec60
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 15 additions and 185 deletions

View File

@ -8,6 +8,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/resourceinterpreter"
@ -23,16 +24,22 @@ func ensureWork(
overrideManager overridemanager.OverrideManager, binding metav1.Object, scope apiextensionsv1.ResourceScope,
) error {
var targetClusters []workv1alpha2.TargetCluster
var placement *policyv1alpha1.Placement
var requiredByBindingSnapshot []workv1alpha2.BindingSnapshot
var replicas int32
switch scope {
case apiextensionsv1.NamespaceScoped:
bindingObj := binding.(*workv1alpha2.ResourceBinding)
targetClusters = bindingObj.Spec.Clusters
requiredByBindingSnapshot = bindingObj.Spec.RequiredBy
placement = bindingObj.Spec.Placement
replicas = bindingObj.Spec.Replicas
case apiextensionsv1.ClusterScoped:
bindingObj := binding.(*workv1alpha2.ClusterResourceBinding)
targetClusters = bindingObj.Spec.Clusters
requiredByBindingSnapshot = bindingObj.Spec.RequiredBy
placement = bindingObj.Spec.Placement
replicas = bindingObj.Spec.Replicas
}
targetClusters = mergeTargetClusters(targetClusters, requiredByBindingSnapshot)
@ -45,7 +52,6 @@ func ensureWork(
return err
}
}
hasScheduledReplica, desireReplicaInfos := getReplicaInfos(targetClusters)
for i := range targetClusters {
targetCluster := targetClusters[i]
@ -53,9 +59,11 @@ func ensureWork(
workNamespace := names.GenerateExecutionSpaceName(targetCluster.Name)
if hasScheduledReplica {
// If and only if the resource template has replicas, and the replica scheduling policy is divided,
// we need to revise replicas.
if needReviseReplicas(replicas, placement) {
if resourceInterpreter.HookEnabled(clonedWorkload.GroupVersionKind(), configv1alpha1.InterpreterOperationReviseReplica) {
clonedWorkload, err = resourceInterpreter.ReviseReplica(clonedWorkload, desireReplicaInfos[targetCluster.Name])
clonedWorkload, err = resourceInterpreter.ReviseReplica(clonedWorkload, int64(targetCluster.Replicas))
if err != nil {
klog.Errorf("Failed to revise replica for %s/%s/%s in cluster %s, err is: %v",
workload.GetKind(), workload.GetNamespace(), workload.GetName(), targetCluster.Name, err)
@ -125,21 +133,6 @@ func mergeTargetClusters(targetClusters []workv1alpha2.TargetCluster, requiredBy
return targetClusters
}
func getReplicaInfos(targetClusters []workv1alpha2.TargetCluster) (bool, map[string]int64) {
if helper.HasScheduledReplica(targetClusters) {
return true, transScheduleResultToMap(targetClusters)
}
return false, nil
}
func transScheduleResultToMap(scheduleResult []workv1alpha2.TargetCluster) map[string]int64 {
var desireReplicaInfos = make(map[string]int64, len(scheduleResult))
for _, clusterInfo := range scheduleResult {
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas)
}
return desireReplicaInfos
}
func mergeLabel(workload *unstructured.Unstructured, workNamespace string, binding metav1.Object, scope apiextensionsv1.ResourceScope) map[string]string {
var workLabel = make(map[string]string)
util.MergeLabel(workload, workv1alpha1.WorkNamespaceLabel, workNamespace)
@ -212,3 +205,7 @@ func divideReplicasByJobCompletions(workload *unstructured.Unstructured, cluster
return targetClusters, nil
}
func needReviseReplicas(replicas int32, placement *policyv1alpha1.Placement) bool {
return replicas > 0 && placement != nil && placement.ReplicaSchedulingType() == policyv1alpha1.ReplicaSchedulingTypeDivided
}

View File

@ -83,95 +83,6 @@ func Test_mergeTargetClusters(t *testing.T) {
}
}
func Test_transScheduleResultToMap(t *testing.T) {
tests := []struct {
name string
scheduleResult []workv1alpha2.TargetCluster
want map[string]int64
}{
{
name: "one cluster",
scheduleResult: []workv1alpha2.TargetCluster{
{
Name: "foo",
Replicas: 1,
},
},
want: map[string]int64{
"foo": 1,
},
},
{
name: "different clusters",
scheduleResult: []workv1alpha2.TargetCluster{
{
Name: "foo",
Replicas: 1,
},
{
Name: "bar",
Replicas: 2,
},
},
want: map[string]int64{
"foo": 1,
"bar": 2,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := transScheduleResultToMap(tt.scheduleResult); !reflect.DeepEqual(got, tt.want) {
t.Errorf("transScheduleResultToMap() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getReplicaInfos(t *testing.T) {
tests := []struct {
name string
targetClusters []workv1alpha2.TargetCluster
wantBool bool
wantRes map[string]int64
}{
{
name: "a cluster with replicas",
targetClusters: []workv1alpha2.TargetCluster{
{
Name: "foo",
Replicas: 1,
},
},
wantBool: true,
wantRes: map[string]int64{
"foo": 1,
},
},
{
name: "none of the clusters have replicas",
targetClusters: []workv1alpha2.TargetCluster{
{
Name: "foo",
},
},
wantBool: false,
wantRes: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, got1 := getReplicaInfos(tt.targetClusters)
if got != tt.wantBool {
t.Errorf("getReplicaInfos() got = %v, want %v", got, tt.wantBool)
}
if !reflect.DeepEqual(got1, tt.wantRes) {
t.Errorf("getReplicaInfos() got1 = %v, want %v", got1, tt.wantRes)
}
})
}
}
func Test_mergeLabel(t *testing.T) {
namespace := "fake-ns"
bindingName := "fake-bindingName"

View File

@ -148,16 +148,6 @@ func IsBindingScheduled(status *workv1alpha2.ResourceBindingStatus) bool {
return meta.IsStatusConditionTrue(status.Conditions, workv1alpha2.Scheduled)
}
// HasScheduledReplica checks if the scheduler has assigned replicas for a cluster.
func HasScheduledReplica(scheduleResult []workv1alpha2.TargetCluster) bool {
for _, clusterResult := range scheduleResult {
if clusterResult.Replicas > 0 {
return true
}
}
return false
}
// ObtainBindingSpecExistingClusters will obtain the cluster slice existing in the binding's spec field.
func ObtainBindingSpecExistingClusters(bindingSpec workv1alpha2.ResourceBindingSpec) sets.Set[string] {
clusterNames := util.ConvertToClusterNames(bindingSpec.Clusters)

View File

@ -181,74 +181,6 @@ func TestDispenseReplicasByTargetClusters(t *testing.T) {
}
}
func TestHasScheduledReplica(t *testing.T) {
tests := []struct {
name string
scheduleResult []workv1alpha2.TargetCluster
want bool
}{
{
name: "all targetCluster have replicas",
scheduleResult: []workv1alpha2.TargetCluster{
{
Name: "foo",
Replicas: 1,
},
{
Name: "bar",
Replicas: 2,
},
},
want: true,
},
{
name: "a targetCluster has replicas",
scheduleResult: []workv1alpha2.TargetCluster{
{
Name: "foo",
Replicas: 1,
},
{
Name: "bar",
},
},
want: true,
},
{
name: "another targetCluster has replicas",
scheduleResult: []workv1alpha2.TargetCluster{
{
Name: "foo",
},
{
Name: "bar",
Replicas: 1,
},
},
want: true,
},
{
name: "not assigned replicas for a cluster",
scheduleResult: []workv1alpha2.TargetCluster{
{
Name: "foo",
},
{
Name: "bar",
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := HasScheduledReplica(tt.scheduleResult); got != tt.want {
t.Errorf("HasScheduledReplica() = %v, want %v", got, tt.want)
}
})
}
}
func TestObtainBindingSpecExistingClusters(t *testing.T) {
tests := []struct {
name string