add test for static weight random remainder.
Signed-off-by: chaosi-zju <chaosi@zju.edu.cn>
This commit is contained in:
parent
e2c6ece93a
commit
1ebdf006e0
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||||
|
|
@ -25,19 +27,44 @@ import (
|
||||||
"github.com/karmada-io/karmada/test/helper"
|
"github.com/karmada-io/karmada/test/helper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test Case of even distribution of replicas, case 1
|
type testcase struct {
|
||||||
// 1. create deployment (replicas=3), weight=1:1
|
name string
|
||||||
// 2. check two member cluster replicas, should be 2:1 or 1:2
|
clusters []*clusterv1alpha1.Cluster
|
||||||
func Test_genericScheduler_AssignReplicas(t *testing.T) {
|
object *workv1alpha2.ResourceBindingSpec
|
||||||
tests := []struct {
|
placement *policyv1alpha1.Placement
|
||||||
name string
|
// changeCondForNextSchedule following cases will schedule twice, this function aims to change the schedule condition for second schedule
|
||||||
clusters []*clusterv1alpha1.Cluster
|
changeCondForNextSchedule func(tt *testcase)
|
||||||
placement *policyv1alpha1.Placement
|
// wants key is first time schedule possible result, value is second time schedule possible result
|
||||||
object *workv1alpha2.ResourceBindingSpec
|
wants map[string][]string
|
||||||
wants [][]workv1alpha2.TargetCluster
|
wantErr bool
|
||||||
wantErr bool
|
}
|
||||||
}{
|
|
||||||
|
var clusterToIndex = map[string]int{
|
||||||
|
ClusterMember1: 0,
|
||||||
|
ClusterMember2: 1,
|
||||||
|
ClusterMember3: 2,
|
||||||
|
ClusterMember4: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
// isScheduleResultEqual change []workv1alpha2.TargetCluster to the string format just like 1:1:1, and compare it to expect string
|
||||||
|
func isScheduleResultEqual(tcs []workv1alpha2.TargetCluster, expect string) bool {
|
||||||
|
res := make([]string, len(tcs))
|
||||||
|
for _, cluster := range tcs {
|
||||||
|
idx := clusterToIndex[cluster.Name]
|
||||||
|
res[idx] = strconv.Itoa(int(cluster.Replicas))
|
||||||
|
}
|
||||||
|
actual := strings.Join(res, ":")
|
||||||
|
return actual == expect
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are acceptance test cases given by QA for requirement: dividing replicas by static weight evenly
|
||||||
|
// https://github.com/karmada-io/karmada/issues/4220
|
||||||
|
func Test_EvenDistributionOfReplicas(t *testing.T) {
|
||||||
|
tests := []*testcase{
|
||||||
{
|
{
|
||||||
|
// Test Case No.1 of even distribution of replicas
|
||||||
|
// 1. create deployment (replicas=3), weight=1:1
|
||||||
|
// 2. check two member cluster replicas, should be 2:1 or 1:2
|
||||||
name: "replica 3, static weighted 1:1",
|
name: "replica 3, static weighted 1:1",
|
||||||
clusters: []*clusterv1alpha1.Cluster{
|
clusters: []*clusterv1alpha1.Cluster{
|
||||||
helper.NewCluster(ClusterMember1),
|
helper.NewCluster(ClusterMember1),
|
||||||
|
|
@ -58,32 +85,85 @@ func Test_genericScheduler_AssignReplicas(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wants: [][]workv1alpha2.TargetCluster{
|
changeCondForNextSchedule: nil,
|
||||||
{
|
wants: map[string][]string{
|
||||||
{Name: ClusterMember1, Replicas: 1},
|
"1:2": {},
|
||||||
{Name: ClusterMember2, Replicas: 2},
|
"2:1": {},
|
||||||
},
|
},
|
||||||
{
|
wantErr: false,
|
||||||
{Name: ClusterMember1, Replicas: 2},
|
},
|
||||||
{Name: ClusterMember2, Replicas: 1},
|
{
|
||||||
|
// Test Case No.2 of even distribution of replicas
|
||||||
|
// 1. create deployment (replicas=3), weight=1:1:1
|
||||||
|
// 2. check three member cluster replicas, should be 1:1:1
|
||||||
|
// 3. update replicas from 3 to 5
|
||||||
|
// 4. check three member cluster replicas, should be 2:2:1 or 2:1:2 or 1:2:2
|
||||||
|
name: "replica 3, static weighted 1:1:1, change replicas from 3 to 5",
|
||||||
|
clusters: []*clusterv1alpha1.Cluster{
|
||||||
|
helper.NewCluster(ClusterMember1),
|
||||||
|
helper.NewCluster(ClusterMember2),
|
||||||
|
helper.NewCluster(ClusterMember3),
|
||||||
|
},
|
||||||
|
object: &workv1alpha2.ResourceBindingSpec{
|
||||||
|
Replicas: 3,
|
||||||
|
},
|
||||||
|
placement: &policyv1alpha1.Placement{
|
||||||
|
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||||
|
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||||
|
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
|
||||||
|
WeightPreference: &policyv1alpha1.ClusterPreferences{
|
||||||
|
StaticWeightList: []policyv1alpha1.StaticClusterWeight{
|
||||||
|
{TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember1}}, Weight: 1},
|
||||||
|
{TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember2}}, Weight: 1},
|
||||||
|
{TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{ClusterMember3}}, Weight: 1},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
changeCondForNextSchedule: func(tt *testcase) {
|
||||||
|
tt.object.Replicas = 5
|
||||||
|
},
|
||||||
|
wants: map[string][]string{
|
||||||
|
"1:1:1": {"2:2:1", "2:1:2", "1:2:2"},
|
||||||
|
},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
g := &genericScheduler{}
|
var g = &genericScheduler{}
|
||||||
|
var firstScheduleResult, secondScheduleResult string
|
||||||
|
|
||||||
|
// 1. schedule for the first time, and check whether first schedule result within tt.wants
|
||||||
got, err := g.assignReplicas(tt.clusters, tt.placement, tt.object)
|
got, err := g.assignReplicas(tt.clusters, tt.placement, tt.object)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("AssignReplicas() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("AssignReplicas() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if tt.wantErr {
|
for firstScheduleResult = range tt.wants {
|
||||||
|
if isScheduleResultEqual(got, firstScheduleResult) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !isScheduleResultEqual(got, firstScheduleResult) {
|
||||||
|
t.Errorf("AssignReplicas() got = %v, wants %v", got, tt.wants)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, want := range tt.wants {
|
|
||||||
if helper.IsScheduleResultEqual(got, want) {
|
// 2. change the schedule condition
|
||||||
|
if tt.changeCondForNextSchedule == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tt.changeCondForNextSchedule(tt)
|
||||||
|
|
||||||
|
// 3. schedule for the second time, and check whether second schedule result within tt.wants
|
||||||
|
got, err = g.assignReplicas(tt.clusters, tt.placement, tt.object)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("AssignReplicas() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, secondScheduleResult = range tt.wants[firstScheduleResult] {
|
||||||
|
if isScheduleResultEqual(got, secondScheduleResult) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue