logic patch for calculating desireReplicaInfos
Signed-off-by: xinjie xu <1912747539@qq.com>
This commit is contained in:
parent
5a025ea3f1
commit
804bbed87c
|
@ -65,17 +65,15 @@ func divideReplicasByStaticWeight(clusters []*clusterv1alpha1.Cluster, weightLis
|
|||
allocatedReplicas += int32(desireReplicaInfos[clusterName])
|
||||
}
|
||||
|
||||
if remainReplicas := replicas - allocatedReplicas; remainReplicas > 0 {
|
||||
sortedClusters := helper.SortClusterByWeight(matchClusters)
|
||||
for i := 0; remainReplicas > 0; i++ {
|
||||
desireReplicaInfos[sortedClusters[i].ClusterName]++
|
||||
remainReplicas--
|
||||
if i == len(desireReplicaInfos) {
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
clusterWeights := helper.SortClusterByWeight(matchClusters)
|
||||
|
||||
var clusterNames []string
|
||||
for _, clusterWeightInfo := range clusterWeights {
|
||||
clusterNames = append(clusterNames, clusterWeightInfo.ClusterName)
|
||||
}
|
||||
|
||||
divideRemainingReplicas(int(replicas-allocatedReplicas), desireReplicaInfos, clusterNames)
|
||||
|
||||
for _, cluster := range clusters {
|
||||
if _, exist := matchClusters[cluster.Name]; !exist {
|
||||
desireReplicaInfos[cluster.Name] = 0
|
||||
|
@ -129,22 +127,18 @@ func divideReplicasByAggregation(clusterAvailableReplicas []workv1alpha2.TargetC
|
|||
|
||||
func divideReplicasByAvailableReplica(clusterAvailableReplicas []workv1alpha2.TargetCluster, replicas int32,
|
||||
clustersMaxReplicas int32, unusedClusters ...string) []workv1alpha2.TargetCluster {
|
||||
desireReplicaInfos := make(map[string]int32)
|
||||
desireReplicaInfos := make(map[string]int64)
|
||||
allocatedReplicas := int32(0)
|
||||
for _, clusterInfo := range clusterAvailableReplicas {
|
||||
desireReplicaInfos[clusterInfo.Name] = clusterInfo.Replicas * replicas / clustersMaxReplicas
|
||||
allocatedReplicas += desireReplicaInfos[clusterInfo.Name]
|
||||
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas * replicas / clustersMaxReplicas)
|
||||
allocatedReplicas += int32(desireReplicaInfos[clusterInfo.Name])
|
||||
}
|
||||
|
||||
if remainReplicas := replicas - allocatedReplicas; remainReplicas > 0 {
|
||||
for i := 0; remainReplicas > 0; i++ {
|
||||
desireReplicaInfos[clusterAvailableReplicas[i].Name]++
|
||||
remainReplicas--
|
||||
if i == len(desireReplicaInfos) {
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
var clusterNames []string
|
||||
for _, targetCluster := range clusterAvailableReplicas {
|
||||
clusterNames = append(clusterNames, targetCluster.Name)
|
||||
}
|
||||
divideRemainingReplicas(int(replicas-allocatedReplicas), desireReplicaInfos, clusterNames)
|
||||
|
||||
// For scaling up
|
||||
for _, cluster := range unusedClusters {
|
||||
|
@ -156,12 +150,35 @@ func divideReplicasByAvailableReplica(clusterAvailableReplicas []workv1alpha2.Ta
|
|||
targetClusters := make([]workv1alpha2.TargetCluster, len(desireReplicaInfos))
|
||||
i := 0
|
||||
for key, value := range desireReplicaInfos {
|
||||
targetClusters[i] = workv1alpha2.TargetCluster{Name: key, Replicas: value}
|
||||
targetClusters[i] = workv1alpha2.TargetCluster{Name: key, Replicas: int32(value)}
|
||||
i++
|
||||
}
|
||||
return targetClusters
|
||||
}
|
||||
|
||||
// divideRemainingReplicas divide remaining Replicas to clusters and calculate desiredReplicaInfos
|
||||
func divideRemainingReplicas(remainingReplicas int, desiredReplicaInfos map[string]int64, clusterNames []string) {
|
||||
if remainingReplicas <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
clusterSize := len(clusterNames)
|
||||
if remainingReplicas < clusterSize {
|
||||
for i := 0; i < remainingReplicas; i++ {
|
||||
desiredReplicaInfos[clusterNames[i]]++
|
||||
}
|
||||
} else {
|
||||
avg, residue := remainingReplicas/clusterSize, remainingReplicas%clusterSize
|
||||
for i := 0; i < clusterSize; i++ {
|
||||
if i < residue {
|
||||
desiredReplicaInfos[clusterNames[i]] += int64(avg) + 1
|
||||
} else {
|
||||
desiredReplicaInfos[clusterNames[i]] += int64(avg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func scaleScheduleByReplicaDivisionPreference(spec *workv1alpha2.ResourceBindingSpec, preference policyv1alpha1.ReplicaDivisionPreference,
|
||||
preSelectedClusters []*clusterv1alpha1.Cluster) ([]workv1alpha2.TargetCluster, error) {
|
||||
assignedReplicas := util.GetSumOfReplicas(spec.Clusters)
|
||||
|
|
|
@ -172,6 +172,107 @@ func Test_divideReplicasByStaticWeight(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_divideRemainingReplicas(t *testing.T) {
|
||||
type args struct {
|
||||
remainingReplicas int
|
||||
desiredReplicaInfos map[string]int64
|
||||
clusterNames []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want map[string]int64
|
||||
}{
|
||||
{
|
||||
name: "remainingReplicas 13",
|
||||
args: args{
|
||||
remainingReplicas: 13,
|
||||
desiredReplicaInfos: map[string]int64{
|
||||
ClusterMember1: 2,
|
||||
ClusterMember2: 3,
|
||||
ClusterMember3: 4,
|
||||
},
|
||||
clusterNames: []string{
|
||||
ClusterMember1, ClusterMember2, ClusterMember3,
|
||||
},
|
||||
},
|
||||
want: map[string]int64{
|
||||
ClusterMember1: 7,
|
||||
ClusterMember2: 7,
|
||||
ClusterMember3: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remainingReplicas 17",
|
||||
args: args{
|
||||
remainingReplicas: 17,
|
||||
desiredReplicaInfos: map[string]int64{
|
||||
ClusterMember1: 4,
|
||||
ClusterMember2: 3,
|
||||
ClusterMember3: 2,
|
||||
},
|
||||
clusterNames: []string{
|
||||
ClusterMember1, ClusterMember2, ClusterMember3,
|
||||
},
|
||||
},
|
||||
want: map[string]int64{
|
||||
ClusterMember1: 10,
|
||||
ClusterMember2: 9,
|
||||
ClusterMember3: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remainingReplicas 2",
|
||||
args: args{
|
||||
remainingReplicas: 2,
|
||||
desiredReplicaInfos: map[string]int64{
|
||||
ClusterMember1: 1,
|
||||
ClusterMember2: 1,
|
||||
ClusterMember3: 1,
|
||||
},
|
||||
clusterNames: []string{
|
||||
ClusterMember1, ClusterMember2, ClusterMember3,
|
||||
},
|
||||
},
|
||||
want: map[string]int64{
|
||||
ClusterMember1: 2,
|
||||
ClusterMember2: 2,
|
||||
ClusterMember3: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remainingReplicas 0",
|
||||
args: args{
|
||||
remainingReplicas: 0,
|
||||
desiredReplicaInfos: map[string]int64{
|
||||
ClusterMember1: 3,
|
||||
ClusterMember2: 3,
|
||||
ClusterMember3: 3,
|
||||
},
|
||||
clusterNames: []string{
|
||||
ClusterMember1, ClusterMember2, ClusterMember3,
|
||||
},
|
||||
},
|
||||
want: map[string]int64{
|
||||
ClusterMember1: 3,
|
||||
ClusterMember2: 3,
|
||||
ClusterMember3: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
IsTwoMapEqual := func(a, b map[string]int64) bool {
|
||||
return a[ClusterMember1] == b[ClusterMember1] && a[ClusterMember2] == b[ClusterMember2] && a[ClusterMember3] == b[ClusterMember3]
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
divideRemainingReplicas(tt.args.remainingReplicas, tt.args.desiredReplicaInfos, tt.args.clusterNames)
|
||||
if !IsTwoMapEqual(tt.args.desiredReplicaInfos, tt.want) {
|
||||
t.Errorf("divideRemainingReplicas() got = %v, want %v", tt.args.desiredReplicaInfos, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_divideReplicasByPreference(t *testing.T) {
|
||||
type args struct {
|
||||
clusterAvailableReplicas []workv1alpha2.TargetCluster
|
||||
|
|
Loading…
Reference in New Issue