Merge pull request #2946 from Garrybest/pr_division

refactor dynamic schedule
This commit is contained in:
karmada-bot 2022-12-16 10:15:42 +08:00 committed by GitHub
commit 73a1b8236e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 684 additions and 939 deletions

View File

@ -1,27 +1,25 @@
package core
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
)
var (
assignFuncMap = map[string]func(*assignState) ([]workv1alpha2.TargetCluster, error){
DuplicatedStrategy: assignByDuplicatedStrategy,
AggregatedStrategy: assignByAggregatedStrategy,
AggregatedStrategy: assignByDynamicStrategy,
StaticWeightStrategy: assignByStaticWeightStrategy,
DynamicWeightStrategy: assignByDynamicWeightStrategy,
DynamicWeightStrategy: assignByDynamicStrategy,
}
)
// assignState is a wrapper of the input for assigning function.
type assignState struct {
candidates []*clusterv1alpha1.Cluster
strategy *policyv1alpha1.ReplicaSchedulingStrategy
object *workv1alpha2.ResourceBindingSpec
}
const (
// DuplicatedStrategy indicates each candidate member cluster will directly apply the original replicas.
DuplicatedStrategy = "Duplicated"
@ -34,20 +32,96 @@ const (
DynamicWeightStrategy = "DynamicWeight"
)
// assignState is a wrapper of the input for assigning function.
type assignState struct {
candidates []*clusterv1alpha1.Cluster
strategy *policyv1alpha1.ReplicaSchedulingStrategy
spec *workv1alpha2.ResourceBindingSpec
// fields below are indirect results
strategyType string
scheduledClusters []workv1alpha2.TargetCluster
assignedReplicas int32
availableClusters []workv1alpha2.TargetCluster
availableReplicas int32
// targetReplicas is the replicas that we need to schedule in this round
targetReplicas int32
}
func newAssignState(candidates []*clusterv1alpha1.Cluster, strategy *policyv1alpha1.ReplicaSchedulingStrategy, obj *workv1alpha2.ResourceBindingSpec) *assignState {
var strategyType string
switch strategy.ReplicaSchedulingType {
case policyv1alpha1.ReplicaSchedulingTypeDuplicated:
strategyType = DuplicatedStrategy
case policyv1alpha1.ReplicaSchedulingTypeDivided:
switch strategy.ReplicaDivisionPreference {
case policyv1alpha1.ReplicaDivisionPreferenceAggregated:
strategyType = AggregatedStrategy
case policyv1alpha1.ReplicaDivisionPreferenceWeighted:
if strategy.WeightPreference != nil && len(strategy.WeightPreference.DynamicWeight) != 0 {
strategyType = DynamicWeightStrategy
} else {
strategyType = StaticWeightStrategy
}
}
}
return &assignState{candidates: candidates, strategy: strategy, spec: obj, strategyType: strategyType}
}
func (as *assignState) buildScheduledClusters() {
as.scheduledClusters = as.spec.Clusters
as.assignedReplicas = util.GetSumOfReplicas(as.scheduledClusters)
}
func (as *assignState) buildAvailableClusters(c calculator) {
as.availableClusters = c(as.candidates, as.spec)
as.availableReplicas = util.GetSumOfReplicas(as.availableClusters)
}
// resortAvailableClusters is used to make sure scheduledClusters are at the front of availableClusters
// list so that we can assign new replicas to them preferentially when scale up.
func (as *assignState) resortAvailableClusters() []workv1alpha2.TargetCluster {
// get the previous scheduled clusters
prior := sets.NewString()
for _, cluster := range as.scheduledClusters {
if cluster.Replicas > 0 {
prior.Insert(cluster.Name)
}
}
if len(prior) == 0 {
return as.availableClusters
}
var (
prev = make([]workv1alpha2.TargetCluster, 0, len(prior))
left = make([]workv1alpha2.TargetCluster, 0, len(as.scheduledClusters)-len(prior))
)
for _, cluster := range as.availableClusters {
if prior.Has(cluster.Name) {
prev = append(prev, cluster)
} else {
left = append(left, cluster)
}
}
as.availableClusters = append(prev, left...)
return as.availableClusters
}
// assignByDuplicatedStrategy assigns replicas by DuplicatedStrategy.
func assignByDuplicatedStrategy(state *assignState) ([]workv1alpha2.TargetCluster, error) {
targetClusters := make([]workv1alpha2.TargetCluster, len(state.candidates))
for i, cluster := range state.candidates {
targetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name, Replicas: state.object.Replicas}
targetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name, Replicas: state.spec.Replicas}
}
return targetClusters, nil
}
// assignByAggregatedStrategy assigns replicas by AggregatedStrategy.
func assignByAggregatedStrategy(state *assignState) ([]workv1alpha2.TargetCluster, error) {
return divideReplicasByResource(state.candidates, state.object, policyv1alpha1.ReplicaDivisionPreferenceAggregated)
}
/*
* assignByStaticWeightStrategy assigns a total number of replicas to the selected clusters by the weight list.
* For example, we want to assign replicas to two clusters named A and B.
@ -66,13 +140,30 @@ func assignByStaticWeightStrategy(state *assignState) ([]workv1alpha2.TargetClus
}
weightList := getStaticWeightInfoList(state.candidates, state.strategy.WeightPreference.StaticWeightList)
acc := newDispenser(state.object.Replicas, nil)
acc.takeByWeight(weightList)
disp := newDispenser(state.spec.Replicas, nil)
disp.takeByWeight(weightList)
return acc.result, nil
return disp.result, nil
}
// assignByDynamicWeightStrategy assigns replicas by assignByDynamicWeightStrategy.
func assignByDynamicWeightStrategy(state *assignState) ([]workv1alpha2.TargetCluster, error) {
return divideReplicasByDynamicWeight(state.candidates, state.strategy.WeightPreference.DynamicWeight, state.object)
func assignByDynamicStrategy(state *assignState) ([]workv1alpha2.TargetCluster, error) {
state.buildScheduledClusters()
if state.assignedReplicas > state.spec.Replicas {
// We need to reduce the replicas in terms of the previous result.
result, err := dynamicScaleDown(state)
if err != nil {
return nil, fmt.Errorf("failed to scale down: %v", err)
}
return result, nil
} else if state.assignedReplicas < state.spec.Replicas {
// We need to enlarge the replicas in terms of the previous result (if exists).
// First scheduling is considered as a special kind of scaling up.
result, err := dynamicScaleUp(state)
if err != nil {
return nil, fmt.Errorf("failed to scale up: %v", err)
}
return result, nil
} else {
return state.scheduledClusters, nil
}
}

View File

@ -3,12 +3,30 @@ package core
import (
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/test/helper"
)
var (
dynamicWeightStrategy = &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{
DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas,
},
}
aggregatedStrategy = &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
}
)
func Test_assignByStaticWeightStrategy(t *testing.T) {
tests := []struct {
name string
@ -245,7 +263,7 @@ func Test_assignByStaticWeightStrategy(t *testing.T) {
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: tt.weightPreference,
},
object: &workv1alpha2.ResourceBindingSpec{Replicas: tt.replicas},
spec: &workv1alpha2.ResourceBindingSpec{Replicas: tt.replicas},
})
if (err != nil) != tt.wantErr {
t.Errorf("divideReplicasByStaticWeight() error = %v, wantErr %v", err, tt.wantErr)
@ -257,3 +275,467 @@ func Test_assignByStaticWeightStrategy(t *testing.T) {
})
}
}
func Test_dynamicScale(t *testing.T) {
tests := []struct {
name string
candidates []*clusterv1alpha1.Cluster
object *workv1alpha2.ResourceBindingSpec
strategy *policyv1alpha1.ReplicaSchedulingStrategy
want []workv1alpha2.TargetCluster
wantErr bool
}{
{
name: "replica 12 -> 6, dynamic weighted 1:1:1",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 6,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 6},
},
},
strategy: dynamicWeightStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 1},
{Name: ClusterMember2, Replicas: 2},
{Name: ClusterMember3, Replicas: 3},
},
wantErr: false,
},
{
name: "replica 12 -> 24, dynamic weighted 10:10:10",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 6},
},
},
strategy: dynamicWeightStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 6},
{Name: ClusterMember2, Replicas: 8},
{Name: ClusterMember3, Replicas: 10},
},
wantErr: false,
},
{
name: "replica 12 -> 24, dynamic weighted 1:1:1",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 6},
},
},
strategy: dynamicWeightStrategy,
wantErr: true,
},
{
name: "replica 12 -> 6, aggregated 1:1:1",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 6,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember2, Replicas: 6},
},
wantErr: false,
},
{
name: "replica 12 -> 8, aggregated 100:100",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 8,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember2, Replicas: 8},
},
wantErr: false,
},
{
name: "replica 12 -> 24, aggregated 4:6:8",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(4, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 6},
{Name: ClusterMember2, Replicas: 12},
{Name: ClusterMember3, Replicas: 6},
},
wantErr: false,
},
{
name: "replica 12 -> 24, aggregated 6:6:20",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(20, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 10},
{Name: ClusterMember2, Replicas: 14},
},
wantErr: false,
},
{
name: "replica 12 -> 24, aggregated 1:1:1",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
strategy: aggregatedStrategy,
wantErr: true,
},
{
name: "replica 12 -> 24, aggregated 4:8:12, with cluster2 disappeared and cluster4 appeared",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(4, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember4, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(12, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 7},
{Name: ClusterMember2, Replicas: 8},
{Name: ClusterMember4, Replicas: 9},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
state := newAssignState(tt.candidates, tt.strategy, tt.object)
got, err := assignByDynamicStrategy(state)
if (err != nil) != tt.wantErr {
t.Errorf("assignByDynamicStrategy() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !helper.IsScheduleResultEqual(got, tt.want) {
t.Errorf("assignByDynamicStrategy() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_dynamicScaleUp(t *testing.T) {
tests := []struct {
name string
candidates []*clusterv1alpha1.Cluster
object *workv1alpha2.ResourceBindingSpec
strategy *policyv1alpha1.ReplicaSchedulingStrategy
want []workv1alpha2.TargetCluster
wantErr bool
}{
{
name: "replica 12, dynamic weight 6:8:10",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
strategy: dynamicWeightStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 3},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 5},
},
wantErr: false,
},
{
name: "replica 12, dynamic weight 8:8:10",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
strategy: dynamicWeightStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 3},
{Name: ClusterMember3, Replicas: 5},
},
wantErr: false,
},
{
name: "replica 12, dynamic weight 3:3:3",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
strategy: dynamicWeightStrategy,
wantErr: true,
},
{
name: "replica 12, aggregated 6:8:10",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember2, Replicas: 5},
{Name: ClusterMember3, Replicas: 7},
},
wantErr: false,
},
{
name: "replica 12, aggregated 12:8:10",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(12, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
strategy: aggregatedStrategy,
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 12},
},
wantErr: false,
},
{
name: "replica 12, aggregated 3:3:3",
candidates: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
object: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
strategy: aggregatedStrategy,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
state := newAssignState(tt.candidates, tt.strategy, tt.object)
state.buildScheduledClusters()
got, err := dynamicScaleUp(state)
if (err != nil) != tt.wantErr {
t.Errorf("dynamicScaleUp() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !helper.IsScheduleResultEqual(got, tt.want) {
t.Errorf("dynamicScaleUp() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -4,8 +4,6 @@ import (
"fmt"
"sort"
"k8s.io/apimachinery/pkg/util/sets"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
@ -96,170 +94,72 @@ func getStaticWeightInfoList(clusters []*clusterv1alpha1.Cluster, weightList []p
return list
}
// divideReplicasByDynamicWeight assigns a total number of replicas to the selected clusters by the dynamic weight list.
func divideReplicasByDynamicWeight(clusters []*clusterv1alpha1.Cluster, dynamicWeight policyv1alpha1.DynamicWeightFactor, spec *workv1alpha2.ResourceBindingSpec) ([]workv1alpha2.TargetCluster, error) {
switch dynamicWeight {
case policyv1alpha1.DynamicWeightByAvailableReplicas:
return divideReplicasByResource(clusters, spec, policyv1alpha1.ReplicaDivisionPreferenceWeighted)
default:
return nil, fmt.Errorf("undefined replica dynamic weight factor: %s", dynamicWeight)
func getStaticWeightInfoListByTargetClusters(tcs []workv1alpha2.TargetCluster) helper.ClusterWeightInfoList {
weightList := make(helper.ClusterWeightInfoList, 0, len(tcs))
for _, result := range tcs {
weightList = append(weightList, helper.ClusterWeightInfo{
ClusterName: result.Name,
Weight: int64(result.Replicas),
})
}
return weightList
}
func divideReplicasByResource(
clusters []*clusterv1alpha1.Cluster,
spec *workv1alpha2.ResourceBindingSpec,
preference policyv1alpha1.ReplicaDivisionPreference,
) ([]workv1alpha2.TargetCluster, error) {
// Step 1: Find the ready clusters that have old replicas
scheduledClusters := findOutScheduledCluster(spec.Clusters, clusters)
// Step 2: calculate the assigned Replicas in scheduledClusters
assignedReplicas := util.GetSumOfReplicas(scheduledClusters)
// Step 3: Check the scale type (up or down).
if assignedReplicas > spec.Replicas {
// We need to reduce the replicas in terms of the previous result.
newTargetClusters, err := scaleDownScheduleByReplicaDivisionPreference(spec, preference)
if err != nil {
return nil, fmt.Errorf("failed to scale down: %v", err)
}
return newTargetClusters, nil
} else if assignedReplicas < spec.Replicas {
// We need to enlarge the replicas in terms of the previous result (if exists).
// First scheduling is considered as a special kind of scaling up.
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference, scheduledClusters, assignedReplicas)
if err != nil {
return nil, fmt.Errorf("failed to scaleUp: %v", err)
}
return newTargetClusters, nil
} else {
return scheduledClusters, nil
}
}
// divideReplicasByPreference assigns a total number of replicas to the selected clusters by preference according to the resource.
func divideReplicasByPreference(
clusterAvailableReplicas []workv1alpha2.TargetCluster,
replicas int32,
preference policyv1alpha1.ReplicaDivisionPreference,
scheduledClusterNames sets.String,
) ([]workv1alpha2.TargetCluster, error) {
clustersMaxReplicas := util.GetSumOfReplicas(clusterAvailableReplicas)
if clustersMaxReplicas < replicas {
return nil, fmt.Errorf("clusters resources are not enough to schedule, max %d replicas are support", clustersMaxReplicas)
// dynamicDivideReplicas assigns a total number of replicas to the selected clusters by preference according to the resource.
func dynamicDivideReplicas(state *assignState) ([]workv1alpha2.TargetCluster, error) {
if state.availableReplicas < state.targetReplicas {
return nil, fmt.Errorf("clusters resources are not enough to schedule, max %d replicas are support", state.availableReplicas)
}
switch preference {
case policyv1alpha1.ReplicaDivisionPreferenceAggregated:
return divideReplicasByAggregation(clusterAvailableReplicas, replicas, scheduledClusterNames), nil
case policyv1alpha1.ReplicaDivisionPreferenceWeighted:
return divideReplicasByAvailableReplica(clusterAvailableReplicas, replicas, clustersMaxReplicas), nil
default:
return nil, fmt.Errorf("undefined replicaSchedulingType %v", preference)
}
}
func divideReplicasByAggregation(clusterAvailableReplicas []workv1alpha2.TargetCluster,
replicas int32, scheduledClusterNames sets.String) []workv1alpha2.TargetCluster {
clusterAvailableReplicas = resortClusterList(clusterAvailableReplicas, scheduledClusterNames)
clustersNum, clustersMaxReplicas := 0, int32(0)
for _, clusterInfo := range clusterAvailableReplicas {
clustersNum++
clustersMaxReplicas += clusterInfo.Replicas
if clustersMaxReplicas >= replicas {
break
}
}
return divideReplicasByAvailableReplica(clusterAvailableReplicas[0:clustersNum], replicas, clustersMaxReplicas)
}
func divideReplicasByAvailableReplica(clusterAvailableReplicas []workv1alpha2.TargetCluster, replicas int32,
clustersMaxReplicas int32) []workv1alpha2.TargetCluster {
desireReplicaInfos := make(map[string]int64)
allocatedReplicas := int32(0)
for _, clusterInfo := range clusterAvailableReplicas {
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas * replicas / clustersMaxReplicas)
allocatedReplicas += int32(desireReplicaInfos[clusterInfo.Name])
}
var clusterNames []string
for _, targetCluster := range clusterAvailableReplicas {
clusterNames = append(clusterNames, targetCluster.Name)
}
divideRemainingReplicas(int(replicas-allocatedReplicas), desireReplicaInfos, clusterNames)
targetClusters := make([]workv1alpha2.TargetCluster, len(desireReplicaInfos))
i := 0
for key, value := range desireReplicaInfos {
targetClusters[i] = workv1alpha2.TargetCluster{Name: key, Replicas: int32(value)}
i++
}
return targetClusters
}
// divideRemainingReplicas divide remaining Replicas to clusters and calculate desiredReplicaInfos
func divideRemainingReplicas(remainingReplicas int, desiredReplicaInfos map[string]int64, clusterNames []string) {
if remainingReplicas <= 0 {
return
}
clusterSize := len(clusterNames)
if remainingReplicas < clusterSize {
for i := 0; i < remainingReplicas; i++ {
desiredReplicaInfos[clusterNames[i]]++
}
} else {
avg, residue := remainingReplicas/clusterSize, remainingReplicas%clusterSize
for i := 0; i < clusterSize; i++ {
if i < residue {
desiredReplicaInfos[clusterNames[i]] += int64(avg) + 1
} else {
desiredReplicaInfos[clusterNames[i]] += int64(avg)
switch state.strategyType {
case AggregatedStrategy:
state.availableClusters = state.resortAvailableClusters()
var sum int32
for i := range state.availableClusters {
if sum += state.availableClusters[i].Replicas; sum >= state.targetReplicas {
state.availableClusters = state.availableClusters[:i+1]
break
}
}
fallthrough
case DynamicWeightStrategy:
// Set the availableClusters as the weight, scheduledClusters as init result, target as the dispenser object.
// After dispensing, the target cluster will be the combination of init result and weighted result for target replicas.
weightList := getStaticWeightInfoListByTargetClusters(state.availableClusters)
disp := newDispenser(state.targetReplicas, state.scheduledClusters)
disp.takeByWeight(weightList)
return disp.result, nil
default:
// should never happen
return nil, fmt.Errorf("undefined strategy type: %s", state.strategyType)
}
}
func scaleDownScheduleByReplicaDivisionPreference(
spec *workv1alpha2.ResourceBindingSpec,
preference policyv1alpha1.ReplicaDivisionPreference,
) ([]workv1alpha2.TargetCluster, error) {
func dynamicScaleDown(state *assignState) ([]workv1alpha2.TargetCluster, error) {
// The previous scheduling result will be the weight reference of scaling down.
// In other words, we scale down the replicas proportionally by their scheduled replicas.
return divideReplicasByPreference(spec.Clusters, spec.Replicas, preference, sets.NewString())
// Now:
// 1. targetReplicas is set to desired replicas.
// 2. availableClusters is set to the former schedule result.
// 3. scheduledClusters and assignedReplicas are not set, which implicates we consider this action as a first schedule.
state.targetReplicas = state.spec.Replicas
state.scheduledClusters = nil
state.buildAvailableClusters(func(_ []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster {
availableClusters := make(TargetClustersList, len(spec.Clusters))
copy(availableClusters, spec.Clusters)
sort.Sort(availableClusters)
return availableClusters
})
return dynamicDivideReplicas(state)
}
func scaleUpScheduleByReplicaDivisionPreference(
clusters []*clusterv1alpha1.Cluster,
spec *workv1alpha2.ResourceBindingSpec,
preference policyv1alpha1.ReplicaDivisionPreference,
scheduledClusters []workv1alpha2.TargetCluster,
assignedReplicas int32,
) ([]workv1alpha2.TargetCluster, error) {
// Step 1: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
newSpec := spec
if assignedReplicas > 0 {
newSpec = spec.DeepCopy()
newSpec.Replicas = spec.Replicas - assignedReplicas
}
// Step 2: Calculate available replicas of all candidates
clusterAvailableReplicas := calAvailableReplicas(clusters, newSpec)
sort.Sort(TargetClustersList(clusterAvailableReplicas))
// Step 3: Begin dividing.
// Only the new replicas are considered during this scheduler, the old replicas will not be moved.
// If not, the old replicas may be recreated which is not expected during scaling up.
// The parameter `scheduledClusterNames` is used to make sure that we assign new replicas to them preferentially
// so that all the replicas are aggregated.
result, err := divideReplicasByPreference(clusterAvailableReplicas, newSpec.Replicas,
preference, util.ConvertToClusterNames(scheduledClusters))
if err != nil {
return result, err
}
// Step 4: Merge the result of previous and new results.
return util.MergeTargetClusters(scheduledClusters, result), nil
func dynamicScaleUp(state *assignState) ([]workv1alpha2.TargetCluster, error) {
// Target is the extra ones.
state.targetReplicas = state.spec.Replicas - state.assignedReplicas
state.buildAvailableClusters(func(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster {
clusterAvailableReplicas := calAvailableReplicas(clusters, spec)
sort.Sort(TargetClustersList(clusterAvailableReplicas))
return clusterAvailableReplicas
})
return dynamicDivideReplicas(state)
}

View File

@ -3,14 +3,7 @@ package core
import (
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
utilhelper "github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/test/helper"
)
@ -106,133 +99,24 @@ func Test_dispenser_takeByWeight(t *testing.T) {
}
}
func Test_divideRemainingReplicas(t *testing.T) {
type args struct {
remainingReplicas int
desiredReplicaInfos map[string]int64
clusterNames []string
}
tests := []struct {
name string
args args
want map[string]int64
}{
{
name: "remainingReplicas 13",
args: args{
remainingReplicas: 13,
desiredReplicaInfos: map[string]int64{
ClusterMember1: 2,
ClusterMember2: 3,
ClusterMember3: 4,
},
clusterNames: []string{
ClusterMember1, ClusterMember2, ClusterMember3,
},
},
want: map[string]int64{
ClusterMember1: 7,
ClusterMember2: 7,
ClusterMember3: 8,
},
},
{
name: "remainingReplicas 17",
args: args{
remainingReplicas: 17,
desiredReplicaInfos: map[string]int64{
ClusterMember1: 4,
ClusterMember2: 3,
ClusterMember3: 2,
},
clusterNames: []string{
ClusterMember1, ClusterMember2, ClusterMember3,
},
},
want: map[string]int64{
ClusterMember1: 10,
ClusterMember2: 9,
ClusterMember3: 7,
},
},
{
name: "remainingReplicas 2",
args: args{
remainingReplicas: 2,
desiredReplicaInfos: map[string]int64{
ClusterMember1: 1,
ClusterMember2: 1,
ClusterMember3: 1,
},
clusterNames: []string{
ClusterMember1, ClusterMember2, ClusterMember3,
},
},
want: map[string]int64{
ClusterMember1: 2,
ClusterMember2: 2,
ClusterMember3: 1,
},
},
{
name: "remainingReplicas 0",
args: args{
remainingReplicas: 0,
desiredReplicaInfos: map[string]int64{
ClusterMember1: 3,
ClusterMember2: 3,
ClusterMember3: 3,
},
clusterNames: []string{
ClusterMember1, ClusterMember2, ClusterMember3,
},
},
want: map[string]int64{
ClusterMember1: 3,
ClusterMember2: 3,
ClusterMember3: 3,
},
},
}
IsTwoMapEqual := func(a, b map[string]int64) bool {
return a[ClusterMember1] == b[ClusterMember1] && a[ClusterMember2] == b[ClusterMember2] && a[ClusterMember3] == b[ClusterMember3]
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
divideRemainingReplicas(tt.args.remainingReplicas, tt.args.desiredReplicaInfos, tt.args.clusterNames)
if !IsTwoMapEqual(tt.args.desiredReplicaInfos, tt.want) {
t.Errorf("divideRemainingReplicas() got = %v, want %v", tt.args.desiredReplicaInfos, tt.want)
}
})
}
}
func Test_divideReplicasByPreference(t *testing.T) {
type args struct {
clusterAvailableReplicas []workv1alpha2.TargetCluster
replicas int32
clustersMaxReplicas int32
preference policyv1alpha1.ReplicaDivisionPreference
scheduledClusterNames sets.String
}
func Test_dynamicDivideReplicas(t *testing.T) {
tests := []struct {
name string
args args
state *assignState
want []workv1alpha2.TargetCluster
wantErr bool
}{
{
name: "replica 12, dynamic weight 18:12:6",
args: args{
clusterAvailableReplicas: TargetClustersList{
state: &assignState{
availableClusters: TargetClustersList{
workv1alpha2.TargetCluster{Name: ClusterMember1, Replicas: 18},
workv1alpha2.TargetCluster{Name: ClusterMember2, Replicas: 12},
workv1alpha2.TargetCluster{Name: ClusterMember3, Replicas: 6},
},
replicas: 12,
clustersMaxReplicas: 36,
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
scheduledClusterNames: sets.NewString(),
targetReplicas: 12,
availableReplicas: 36,
strategyType: DynamicWeightStrategy,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 6},
@ -243,16 +127,15 @@ func Test_divideReplicasByPreference(t *testing.T) {
},
{
name: "replica 12, dynamic weight 20:12:6",
args: args{
clusterAvailableReplicas: TargetClustersList{
state: &assignState{
availableClusters: TargetClustersList{
workv1alpha2.TargetCluster{Name: ClusterMember1, Replicas: 20},
workv1alpha2.TargetCluster{Name: ClusterMember2, Replicas: 12},
workv1alpha2.TargetCluster{Name: ClusterMember3, Replicas: 6},
},
replicas: 12,
clustersMaxReplicas: 38,
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
scheduledClusterNames: sets.NewString(),
targetReplicas: 12,
availableReplicas: 38,
strategyType: DynamicWeightStrategy,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 7},
@ -263,16 +146,15 @@ func Test_divideReplicasByPreference(t *testing.T) {
},
{
name: "replica 12, dynamic weight 6:12:6",
args: args{
clusterAvailableReplicas: TargetClustersList{
state: &assignState{
availableClusters: TargetClustersList{
workv1alpha2.TargetCluster{Name: ClusterMember1, Replicas: 6},
workv1alpha2.TargetCluster{Name: ClusterMember2, Replicas: 12},
workv1alpha2.TargetCluster{Name: ClusterMember3, Replicas: 6},
},
replicas: 12,
clustersMaxReplicas: 24,
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
scheduledClusterNames: sets.NewString(),
targetReplicas: 12,
availableReplicas: 24,
strategyType: DynamicWeightStrategy,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 3},
@ -283,16 +165,15 @@ func Test_divideReplicasByPreference(t *testing.T) {
},
{
name: "replica 12, aggregated 12:6:6",
args: args{
clusterAvailableReplicas: TargetClustersList{
state: &assignState{
availableClusters: TargetClustersList{
workv1alpha2.TargetCluster{Name: ClusterMember2, Replicas: 12},
workv1alpha2.TargetCluster{Name: ClusterMember1, Replicas: 6},
workv1alpha2.TargetCluster{Name: ClusterMember3, Replicas: 6},
},
replicas: 12,
clustersMaxReplicas: 24,
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
scheduledClusterNames: sets.NewString(),
targetReplicas: 12,
availableReplicas: 24,
strategyType: AggregatedStrategy,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember2, Replicas: 12},
@ -301,16 +182,15 @@ func Test_divideReplicasByPreference(t *testing.T) {
},
{
name: "replica 12, aggregated 6:6:6",
args: args{
clusterAvailableReplicas: TargetClustersList{
state: &assignState{
availableClusters: TargetClustersList{
workv1alpha2.TargetCluster{Name: ClusterMember1, Replicas: 6},
workv1alpha2.TargetCluster{Name: ClusterMember2, Replicas: 6},
workv1alpha2.TargetCluster{Name: ClusterMember3, Replicas: 6},
},
replicas: 12,
clustersMaxReplicas: 18,
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
scheduledClusterNames: sets.NewString(),
targetReplicas: 12,
availableReplicas: 18,
strategyType: AggregatedStrategy,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 6},
@ -321,451 +201,13 @@ func Test_divideReplicasByPreference(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := divideReplicasByPreference(tt.args.clusterAvailableReplicas, tt.args.replicas, tt.args.preference, tt.args.scheduledClusterNames)
got, err := dynamicDivideReplicas(tt.state)
if (err != nil) != tt.wantErr {
t.Errorf("divideReplicasByPreference() error = %v, wantErr %v", err, tt.wantErr)
t.Errorf("dynamicDivideReplicas() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !helper.IsScheduleResultEqual(got, tt.want) {
t.Errorf("divideReplicasByPreference() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_divideReplicasByResource(t *testing.T) {
type args struct {
clusters []*clusterv1alpha1.Cluster
spec *workv1alpha2.ResourceBindingSpec
preference policyv1alpha1.ReplicaDivisionPreference
}
tests := []struct {
name string
args args
want []workv1alpha2.TargetCluster
wantErr bool
}{
{
name: "replica 12, dynamic weight 6:8:10",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 3},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 5},
},
wantErr: false,
},
{
name: "replica 12, dynamic weight 8:8:10",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 3},
{Name: ClusterMember3, Replicas: 5},
},
wantErr: false,
},
{
name: "replica 12, dynamic weight 3:3:3",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
},
wantErr: true,
},
{
name: "replica 12, aggregated 6:8:10",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember2, Replicas: 5},
{Name: ClusterMember3, Replicas: 7},
},
wantErr: false,
},
{
name: "replica 12, aggregated 12:8:10",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(12, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 12},
},
wantErr: false,
},
{
name: "replica 12, aggregated 3:3:3",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 12,
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := divideReplicasByResource(tt.args.clusters, tt.args.spec, tt.args.preference)
if (err != nil) != tt.wantErr {
t.Errorf("divideReplicasByResource() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !helper.IsScheduleResultEqual(got, tt.want) {
t.Errorf("divideReplicasByResource() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_scaleScheduling(t *testing.T) {
type args struct {
clusters []*clusterv1alpha1.Cluster
spec *workv1alpha2.ResourceBindingSpec
preference policyv1alpha1.ReplicaDivisionPreference
}
tests := []struct {
name string
args args
want []workv1alpha2.TargetCluster
wantErr bool
}{
{
name: "replica 12 -> 6, dynamic weighted 1:1:1",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 6,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 6},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 1},
{Name: ClusterMember2, Replicas: 2},
{Name: ClusterMember3, Replicas: 3},
},
wantErr: false,
},
{
name: "replica 12 -> 24, dynamic weighted 10:10:10",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 6},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 6},
{Name: ClusterMember2, Replicas: 8},
{Name: ClusterMember3, Replicas: 10},
},
wantErr: false,
},
{
name: "replica 12 -> 24, dynamic weighted 1:1:1",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
{Name: ClusterMember3, Replicas: 6},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
},
wantErr: true,
},
{
name: "replica 12 -> 6, aggregated 1:1:1",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 6,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 2},
{Name: ClusterMember2, Replicas: 4},
},
wantErr: false,
},
{
name: "replica 12 -> 24, aggregated 4:6:8",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(4, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(6, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 6},
{Name: ClusterMember2, Replicas: 13},
{Name: ClusterMember3, Replicas: 5},
},
wantErr: false,
},
{
name: "replica 12 -> 24, dynamic weighted 1:1:1",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
wantErr: true,
},
{
name: "replica 12 -> 24, aggregated 4:8:12, with cluster2 disappeared and cluster4 appeared",
args: args{
clusters: []*clusterv1alpha1.Cluster{
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(4, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember3, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
helper.NewClusterWithResource(ClusterMember4, corev1.ResourceList{
corev1.ResourcePods: *resource.NewQuantity(12, resource.DecimalSI),
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
},
spec: &workv1alpha2.ResourceBindingSpec{
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
ResourceRequest: util.EmptyResource().ResourceList(),
},
Replicas: 24,
Clusters: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 4},
{Name: ClusterMember2, Replicas: 8},
},
},
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 8},
{Name: ClusterMember3, Replicas: 6},
{Name: ClusterMember4, Replicas: 10},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := divideReplicasByResource(tt.args.clusters, tt.args.spec, tt.args.preference)
if (err != nil) != tt.wantErr {
t.Errorf("scaleScheduleByReplicaDivisionPreference() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !helper.IsScheduleResultEqual(got, tt.want) {
t.Errorf("scaleScheduling() got = %v, want %v", got, tt.want)
t.Errorf("dynamicDivideReplicas() got = %v, want %v", got, tt.want)
}
})
}

View File

@ -180,35 +180,14 @@ func (g *genericScheduler) assignReplicas(
}
if object.Replicas > 0 && replicaSchedulingStrategy != nil {
var strategy string
switch replicaSchedulingStrategy.ReplicaSchedulingType {
case policyv1alpha1.ReplicaSchedulingTypeDuplicated:
strategy = DuplicatedStrategy
case policyv1alpha1.ReplicaSchedulingTypeDivided:
switch replicaSchedulingStrategy.ReplicaDivisionPreference {
case policyv1alpha1.ReplicaDivisionPreferenceAggregated:
strategy = AggregatedStrategy
case policyv1alpha1.ReplicaDivisionPreferenceWeighted:
if replicaSchedulingStrategy.WeightPreference != nil && len(replicaSchedulingStrategy.WeightPreference.DynamicWeight) != 0 {
strategy = DynamicWeightStrategy
} else {
strategy = StaticWeightStrategy
}
}
}
assign, ok := assignFuncMap[strategy]
state := newAssignState(clusters, replicaSchedulingStrategy, object)
assignFunc, ok := assignFuncMap[state.strategyType]
if !ok {
// should never happen at present
return nil, fmt.Errorf("unsupported replica scheduling strategy, replicaSchedulingType: %s, replicaDivisionPreference: %s, "+
"please try another scheduling strategy", replicaSchedulingStrategy.ReplicaSchedulingType, replicaSchedulingStrategy.ReplicaDivisionPreference)
}
return assign(&assignState{
candidates: clusters,
strategy: replicaSchedulingStrategy,
object: object,
})
return assignFunc(state)
}
// If not workload, assign all clusters without considering replicas.

View File

@ -15,6 +15,8 @@ import (
"github.com/karmada-io/karmada/pkg/util"
)
type calculator func([]*clusterv1alpha1.Cluster, *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster
func getDefaultWeightPreference(clusters []*clusterv1alpha1.Cluster) *policyv1alpha1.ClusterPreferences {
staticWeightLists := make([]policyv1alpha1.StaticClusterWeight, 0)
for _, cluster := range clusters {
@ -73,52 +75,6 @@ func calAvailableReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha
return availableTargetClusters
}
// findOutScheduledCluster will return a slice of clusters
// which are a part of `TargetClusters` and have non-zero replicas.
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) []workv1alpha2.TargetCluster {
validTarget := make([]workv1alpha2.TargetCluster, 0)
if len(tcs) == 0 {
return validTarget
}
for _, targetCluster := range tcs {
// must have non-zero replicas
if targetCluster.Replicas <= 0 {
continue
}
// must in `candidates`
for _, cluster := range candidates {
if targetCluster.Name == cluster.Name {
validTarget = append(validTarget, targetCluster)
break
}
}
}
return validTarget
}
// resortClusterList is used to make sure scheduledClusterNames are in front of the other clusters in the list of
// clusterAvailableReplicas so that we can assign new replicas to them preferentially when scale up.
// Note that scheduledClusterNames have none items during first scheduler
func resortClusterList(clusterAvailableReplicas []workv1alpha2.TargetCluster, scheduledClusterNames sets.String) []workv1alpha2.TargetCluster {
if scheduledClusterNames.Len() == 0 {
return clusterAvailableReplicas
}
var preUsedCluster []workv1alpha2.TargetCluster
var unUsedCluster []workv1alpha2.TargetCluster
for i := range clusterAvailableReplicas {
if scheduledClusterNames.Has(clusterAvailableReplicas[i].Name) {
preUsedCluster = append(preUsedCluster, clusterAvailableReplicas[i])
} else {
unUsedCluster = append(unUsedCluster, clusterAvailableReplicas[i])
}
}
clusterAvailableReplicas = append(preUsedCluster, unUsedCluster...)
klog.V(4).Infof("Resorted target cluster: %v", clusterAvailableReplicas)
return clusterAvailableReplicas
}
// attachZeroReplicasCluster attach cluster in clusters into targetCluster
// The purpose is to avoid workload not appeared in rb's spec.clusters field
func attachZeroReplicasCluster(clusters []*clusterv1alpha1.Cluster, targetClusters []workv1alpha2.TargetCluster) []workv1alpha2.TargetCluster {

View File

@ -4,116 +4,11 @@ import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/test/helper"
)
func Test_findOutScheduledCluster(t *testing.T) {
type args struct {
tcs []workv1alpha2.TargetCluster
candidates []*clusterv1alpha1.Cluster
}
tests := []struct {
name string
args args
want []workv1alpha2.TargetCluster
}{
{
name: "tcs: member1,member2,member3, candidates: member1,member2",
args: args{
tcs: []workv1alpha2.TargetCluster{
{
Name: ClusterMember1,
Replicas: 1,
},
{
Name: ClusterMember2,
Replicas: 1,
},
{
Name: ClusterMember3,
Replicas: 1,
},
},
candidates: []*clusterv1alpha1.Cluster{
{
ObjectMeta: metav1.ObjectMeta{
Name: ClusterMember1,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: ClusterMember2,
},
},
},
},
want: []workv1alpha2.TargetCluster{
{
Name: ClusterMember1,
Replicas: 1,
},
{
Name: ClusterMember2,
Replicas: 1,
},
},
},
{
name: "tcs: member1,member2, candidates: member1,member2,member3",
args: args{
tcs: []workv1alpha2.TargetCluster{
{
Name: ClusterMember1,
Replicas: 1,
},
{
Name: ClusterMember2,
Replicas: 1,
},
},
candidates: []*clusterv1alpha1.Cluster{
{
ObjectMeta: metav1.ObjectMeta{
Name: ClusterMember1,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: ClusterMember2,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: ClusterMember3,
},
},
},
},
want: []workv1alpha2.TargetCluster{
{
Name: ClusterMember1,
Replicas: 1,
},
{
Name: ClusterMember2,
Replicas: 1,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := findOutScheduledCluster(tt.args.tcs, tt.args.candidates); !reflect.DeepEqual(got, tt.want) {
t.Errorf("findOutScheduledCluster() = %v, want %v", got, tt.want)
}
})
}
}
func Test_attachZeroReplicasCluster(t *testing.T) {
type args struct {
clusters []*clusterv1alpha1.Cluster