Merge pull request #506 from qianjun1993/scheduler

scheduler with weighted ReplicaDivisionPreference
This commit is contained in:
karmada-bot 2021-07-09 10:29:28 +08:00 committed by GitHub
commit e1dfb7707d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 67 additions and 2 deletions

View File

@ -14,6 +14,7 @@ import (
"github.com/karmada-io/karmada/pkg/scheduler/framework"
"github.com/karmada-io/karmada/pkg/scheduler/framework/runtime"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/helper"
)
// ScheduleAlgorithm is the interface that should be implemented to schedule a resource to the target clusters.
@ -205,6 +206,14 @@ func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, r
}
return targetClusters, nil
}
if replicaSchedulingStrategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDivided {
if replicaSchedulingStrategy.ReplicaDivisionPreference == policyv1alpha1.ReplicaDivisionPreferenceWeighted {
if replicaSchedulingStrategy.WeightPreference == nil {
return nil, fmt.Errorf("no WeightPreference find to divide replicas")
}
return g.divideReplicasByStaticWeight(clusters, replicaSchedulingStrategy.WeightPreference.StaticWeightList, object.Replicas)
}
}
}
for i, cluster := range clusters {
@ -212,3 +221,58 @@ func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, r
}
return targetClusters, nil
}
// divideReplicasByStaticWeight assigns a total number of replicas to the selected clusters by the weight list.
func (g *genericScheduler) divideReplicasByStaticWeight(clusters []*clusterv1alpha1.Cluster, staticWeightList []policyv1alpha1.StaticClusterWeight, replicas int32) ([]workv1alpha1.TargetCluster, error) {
weightSum := int64(0)
matchClusters := make(map[string]int64)
desireReplicaInfos := make(map[string]int64)
for _, cluster := range clusters {
for _, staticWeightRule := range staticWeightList {
if util.ClusterMatches(cluster, staticWeightRule.TargetCluster) {
weightSum += staticWeightRule.Weight
matchClusters[cluster.Name] = staticWeightRule.Weight
break
}
}
}
if weightSum == 0 {
for _, cluster := range clusters {
weightSum++
matchClusters[cluster.Name] = 1
}
}
allocatedReplicas := int32(0)
for clusterName, weight := range matchClusters {
desireReplicaInfos[clusterName] = weight * int64(replicas) / weightSum
allocatedReplicas += int32(desireReplicaInfos[clusterName])
}
if remainReplicas := replicas - allocatedReplicas; remainReplicas > 0 {
sortedClusters := helper.SortClusterByWeight(matchClusters)
for i := 0; remainReplicas > 0; i++ {
desireReplicaInfos[sortedClusters[i].ClusterName]++
remainReplicas--
if i == len(desireReplicaInfos) {
i = 0
}
}
}
for _, cluster := range clusters {
if _, exist := matchClusters[cluster.Name]; !exist {
desireReplicaInfos[cluster.Name] = 0
}
}
targetClusters := make([]workv1alpha1.TargetCluster, len(desireReplicaInfos))
i := 0
for key, value := range desireReplicaInfos {
targetClusters[i] = workv1alpha1.TargetCluster{Name: key, Replicas: int32(value)}
i++
}
return targetClusters, nil
}

View File

@ -53,7 +53,8 @@ func (p ClusterWeightInfoList) Less(i, j int) bool {
return p[i].ClusterName < p[j].ClusterName
}
func sortClusterByWeight(m map[string]int64) ClusterWeightInfoList {
// SortClusterByWeight sort clusters by the weight
func SortClusterByWeight(m map[string]int64) ClusterWeightInfoList {
p := make(ClusterWeightInfoList, len(m))
i := 0
for k, v := range m {
@ -393,7 +394,7 @@ func calculateReplicas(c client.Client, policy *v1alpha1.ReplicaSchedulingPolicy
}
if remainReplicas := policy.Spec.TotalReplicas - allocatedReplicas; remainReplicas > 0 {
sortedClusters := sortClusterByWeight(matchClusters)
sortedClusters := SortClusterByWeight(matchClusters)
for i := 0; remainReplicas > 0; i++ {
desireReplicaInfos[sortedClusters[i].ClusterName]++
remainReplicas--