Merge pull request #4586 from jwcesign/dev
fix: reschedule the replicas of the disappear clusters in PP
This commit is contained in:
commit
ddfe903fb7
|
@ -90,7 +90,21 @@ func newAssignState(candidates []*clusterv1alpha1.Cluster, placement *policyv1al
|
|||
}
|
||||
|
||||
func (as *assignState) buildScheduledClusters() {
|
||||
as.scheduledClusters = as.spec.Clusters
|
||||
candidateClusterSet := sets.Set[string]{}
|
||||
for _, c := range as.candidates {
|
||||
candidateClusterSet.Insert(c.Name)
|
||||
}
|
||||
as.scheduledClusters = []workv1alpha2.TargetCluster{}
|
||||
for _, c := range as.spec.Clusters {
|
||||
// Ignore clusters that are no longer candidates, to ensure we can get real
|
||||
// 'assigned' replicas from the previous schedule result. The ignored replicas
|
||||
// will be treated as scaled-up replicas that will be assigned to other
|
||||
// candidate clusters.
|
||||
if !candidateClusterSet.Has(c.Name) {
|
||||
continue
|
||||
}
|
||||
as.scheduledClusters = append(as.scheduledClusters, c)
|
||||
}
|
||||
as.assignedReplicas = util.GetSumOfReplicas(as.scheduledClusters)
|
||||
}
|
||||
|
||||
|
|
|
@ -580,8 +580,8 @@ func Test_dynamicScale(t *testing.T) {
|
|||
},
|
||||
want: []workv1alpha2.TargetCluster{
|
||||
{Name: ClusterMember1, Replicas: 7},
|
||||
{Name: ClusterMember2, Replicas: 8},
|
||||
{Name: ClusterMember4, Replicas: 9},
|
||||
{Name: ClusterMember3, Replicas: 6},
|
||||
{Name: ClusterMember4, Replicas: 11},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
@ -785,6 +785,37 @@ func Test_dynamicScaleUp(t *testing.T) {
|
|||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "replica 12, dynamic weight 3:3, with cluster3 disappeared and cluster2 appeared",
|
||||
candidates: []*clusterv1alpha1.Cluster{
|
||||
helper.NewClusterWithResource(ClusterMember1, corev1.ResourceList{
|
||||
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
|
||||
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
|
||||
helper.NewClusterWithResource(ClusterMember2, corev1.ResourceList{
|
||||
corev1.ResourcePods: *resource.NewQuantity(3, resource.DecimalSI),
|
||||
}, util.EmptyResource().ResourceList(), util.EmptyResource().ResourceList()),
|
||||
},
|
||||
object: &workv1alpha2.ResourceBindingSpec{
|
||||
ReplicaRequirements: &workv1alpha2.ReplicaRequirements{
|
||||
ResourceRequest: util.EmptyResource().ResourceList(),
|
||||
},
|
||||
Clusters: []workv1alpha2.TargetCluster{
|
||||
{Name: ClusterMember1, Replicas: 6},
|
||||
{Name: ClusterMember3, Replicas: 6},
|
||||
},
|
||||
Replicas: 12,
|
||||
},
|
||||
placement: &policyv1alpha1.Placement{
|
||||
ReplicaScheduling: dynamicWeightStrategy,
|
||||
},
|
||||
wants: [][]workv1alpha2.TargetCluster{
|
||||
{
|
||||
{Name: ClusterMember1, Replicas: 9},
|
||||
{Name: ClusterMember2, Replicas: 3},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
Loading…
Reference in New Issue