fix spreadconstraints[i].MaxGroups Invalidation when scaleup replicas
Signed-off-by: huone1 <huwanxing@huawei.com>
This commit is contained in:
parent
e0e7a60c95
commit
75aa4ce0e6
|
@ -48,7 +48,7 @@ func divideReplicasByResource(
|
|||
} else if assignedReplicas < spec.Replicas {
|
||||
// We need to enlarge the replicas in terms of the previous result (if exists).
|
||||
// First scheduling is considered as a special kind of scaling up.
|
||||
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference, assignedReplicas)
|
||||
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scaleUp: %v", err)
|
||||
}
|
||||
|
@ -208,31 +208,34 @@ func scaleUpScheduleByReplicaDivisionPreference(
|
|||
clusters []*clusterv1alpha1.Cluster,
|
||||
spec *workv1alpha2.ResourceBindingSpec,
|
||||
preference policyv1alpha1.ReplicaDivisionPreference,
|
||||
assignedReplicas int32,
|
||||
) ([]workv1alpha2.TargetCluster, error) {
|
||||
// Step 1: Find the clusters that have old replicas, so we can prefer to assign new replicas towards them.
|
||||
scheduledClusterNames := findOutScheduledCluster(spec.Clusters, clusters)
|
||||
scheduledClusters := findOutScheduledCluster(spec.Clusters, clusters)
|
||||
|
||||
// Step 2: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
|
||||
// Step 2: calculate the assigned Replicas in scheduledClusters
|
||||
assignedReplicas := util.GetSumOfReplicas(scheduledClusters)
|
||||
|
||||
// Step 3: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
|
||||
newSpec := spec
|
||||
if assignedReplicas > 0 {
|
||||
newSpec = spec.DeepCopy()
|
||||
newSpec.Replicas = spec.Replicas - assignedReplicas
|
||||
}
|
||||
|
||||
// Step 3: Calculate available replicas of all candidates
|
||||
// Step 4: Calculate available replicas of all candidates
|
||||
clusterAvailableReplicas := calAvailableReplicas(clusters, newSpec)
|
||||
|
||||
// Step 4: Begin dividing.
|
||||
// Step 5: Begin dividing.
|
||||
// Only the new replicas are considered during this scheduler, the old replicas will not be moved.
|
||||
// If not, the old replicas may be recreated which is not expected during scaling up.
|
||||
// The parameter `scheduledClusterNames` is used to make sure that we assign new replicas to them preferentially
|
||||
// so that all the replicas are aggregated.
|
||||
result, err := divideReplicasByPreference(clusterAvailableReplicas, newSpec.Replicas, preference, scheduledClusterNames)
|
||||
result, err := divideReplicasByPreference(clusterAvailableReplicas, newSpec.Replicas,
|
||||
preference, util.ConvertToClusterNames(scheduledClusters))
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Step 5: Merge the result of previous and new results.
|
||||
return util.MergeTargetClusters(spec.Clusters, result), nil
|
||||
// Step 6: Merge the result of previous and new results.
|
||||
return util.MergeTargetClusters(scheduledClusters, result), nil
|
||||
}
|
||||
|
|
|
@ -818,9 +818,9 @@ func Test_scaleScheduling(t *testing.T) {
|
|||
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
|
||||
},
|
||||
want: []workv1alpha2.TargetCluster{
|
||||
{Name: ClusterMember1, Replicas: 7},
|
||||
{Name: ClusterMember2, Replicas: 8},
|
||||
{Name: ClusterMember4, Replicas: 9},
|
||||
{Name: ClusterMember1, Replicas: 8},
|
||||
{Name: ClusterMember3, Replicas: 6},
|
||||
{Name: ClusterMember4, Replicas: 10},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
|
|
@ -75,13 +75,14 @@ func calAvailableReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha
|
|||
return availableTargetClusters
|
||||
}
|
||||
|
||||
// findOutScheduledCluster will return a name set of clusters
|
||||
// which are a part of `feasibleClusters` and have non-zero replicas.
|
||||
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) sets.String {
|
||||
res := sets.NewString()
|
||||
// findOutScheduledCluster will return a slice of clusters
|
||||
// which are a part of `TargetClusters` and have non-zero replicas.
|
||||
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) []workv1alpha2.TargetCluster {
|
||||
validTarget := make([]workv1alpha2.TargetCluster, 0)
|
||||
if len(tcs) == 0 {
|
||||
return res
|
||||
return validTarget
|
||||
}
|
||||
|
||||
for _, targetCluster := range tcs {
|
||||
// must have non-zero replicas
|
||||
if targetCluster.Replicas <= 0 {
|
||||
|
@ -90,12 +91,13 @@ func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clu
|
|||
// must in `candidates`
|
||||
for _, cluster := range candidates {
|
||||
if targetCluster.Name == cluster.Name {
|
||||
res.Insert(targetCluster.Name)
|
||||
validTarget = append(validTarget, targetCluster)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
|
||||
return validTarget
|
||||
}
|
||||
|
||||
// resortClusterList is used to make sure scheduledClusterNames are in front of the other clusters in the list of
|
||||
|
|
Loading…
Reference in New Issue