add karmada-descheduler test
Signed-off-by: Garrybest <garrybest@foxmail.com>
This commit is contained in:
parent
85d8a6ccf4
commit
513071d9eb
|
@ -0,0 +1,169 @@
|
|||
package options
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
)
|
||||
|
||||
func TestValidateKarmadaDescheduler(t *testing.T) {
|
||||
successCases := []Options{
|
||||
{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 9001,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
},
|
||||
{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 9001,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
}, {
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
}}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := successCase.Validate(); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
newPath := field.NewPath("Options")
|
||||
testCases := map[string]struct {
|
||||
opt Options
|
||||
expectedErrs field.ErrorList
|
||||
}{
|
||||
"invalid BindAddress": {
|
||||
opt: Options{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1:8080",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 9001,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
},
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("BindAddress"), "127.0.0.1:8080", "not a valid textual representation of an IP address")},
|
||||
},
|
||||
"invalid SecurePort": {
|
||||
opt: Options{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 90000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 9001,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
},
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SecurePort"), 90000, "must be a valid port between 0 and 65535 inclusive")},
|
||||
},
|
||||
"invalid SchedulerEstimatorPort": {
|
||||
opt: Options{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 90000,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
},
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SchedulerEstimatorPort"), 90000, "must be a valid port between 0 and 65535 inclusive")},
|
||||
},
|
||||
"invalid SchedulerEstimatorTimeout": {
|
||||
opt: Options{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: -1 * time.Second},
|
||||
SchedulerEstimatorPort: 9000,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
},
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SchedulerEstimatorTimeout"), metav1.Duration{Duration: -1 * time.Second}, "must be greater than or equal to 0")},
|
||||
},
|
||||
"invalid DeschedulingInterval": {
|
||||
opt: Options{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 9000,
|
||||
DeschedulingInterval: metav1.Duration{Duration: -1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: 1 * time.Second},
|
||||
},
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("DeschedulingInterval"), metav1.Duration{Duration: -1 * time.Second}, "must be greater than or equal to 0")},
|
||||
},
|
||||
"invalid UnschedulableThreshold": {
|
||||
opt: Options{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
},
|
||||
BindAddress: "127.0.0.1",
|
||||
SecurePort: 9000,
|
||||
KubeAPIQPS: 40,
|
||||
KubeAPIBurst: 30,
|
||||
SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
SchedulerEstimatorPort: 9000,
|
||||
DeschedulingInterval: metav1.Duration{Duration: 1 * time.Second},
|
||||
UnschedulableThreshold: metav1.Duration{Duration: -1 * time.Second},
|
||||
},
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("UnschedulableThreshold"), metav1.Duration{Duration: -1 * time.Second}, "must be greater than or equal to 0")},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
errs := testCase.opt.Validate()
|
||||
if len(testCase.expectedErrs) != len(errs) {
|
||||
t.Fatalf("Expected %d errors, got %d errors: %v", len(testCase.expectedErrs), len(errs), errs)
|
||||
}
|
||||
for i, err := range errs {
|
||||
if err.Error() != testCase.expectedErrs[i].Error() {
|
||||
t.Fatalf("Expected error: %s, got %s", testCase.expectedErrs[i], err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,520 @@
|
|||
package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"google.golang.org/grpc"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
estimatorclient "github.com/karmada-io/karmada/pkg/estimator/client"
|
||||
"github.com/karmada-io/karmada/pkg/estimator/pb"
|
||||
estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service"
|
||||
fakekarmadaclient "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
func buildBinding(name, ns string, target, status []workv1alpha2.TargetCluster) (*workv1alpha2.ResourceBinding, error) {
|
||||
bindingStatus := workv1alpha2.ResourceBindingStatus{}
|
||||
for _, cluster := range status {
|
||||
statusMap := map[string]interface{}{
|
||||
util.ReadyReplicasField: cluster.Replicas,
|
||||
}
|
||||
raw, err := helper.BuildStatusRawExtension(statusMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bindingStatus.AggregatedStatus = append(bindingStatus.AggregatedStatus, workv1alpha2.AggregatedStatusItem{
|
||||
ClusterName: cluster.Name,
|
||||
Status: raw,
|
||||
Applied: true,
|
||||
})
|
||||
}
|
||||
return &workv1alpha2.ResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Clusters: target,
|
||||
},
|
||||
Status: bindingStatus,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestDescheduler_worker(t *testing.T) {
|
||||
type args struct {
|
||||
target []workv1alpha2.TargetCluster
|
||||
status []workv1alpha2.TargetCluster
|
||||
unschedulable []workv1alpha2.TargetCluster
|
||||
name string
|
||||
namespace string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantResponse []workv1alpha2.TargetCluster
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "1 cluster without unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 0,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "1 cluster with 1 unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 4,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 1,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 4,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "1 cluster with all unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 0,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 0,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "1 cluster with 4 ready replicas and 2 unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 4,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 4,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "1 cluster with 0 ready replicas and 2 unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 0,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 3,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "1 cluster with 6 ready replicas and 2 unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 6,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "2 cluster without unschedulable replicas",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 10,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 10,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 0,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 0,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 10,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "2 cluster with 1 unschedulable replica",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 10,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 9,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 0,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 1,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 9,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "2 cluster with unscheable replicas of every cluster",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 10,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 3,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 3,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 7,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 3,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "2 cluster with 1 cluster status loss",
|
||||
args: args{
|
||||
target: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 5,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 10,
|
||||
},
|
||||
},
|
||||
status: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
},
|
||||
unschedulable: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 3,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 7,
|
||||
},
|
||||
},
|
||||
name: "foo",
|
||||
namespace: "default",
|
||||
},
|
||||
wantResponse: []workv1alpha2.TargetCluster{
|
||||
{
|
||||
Name: "member1",
|
||||
Replicas: 2,
|
||||
},
|
||||
{
|
||||
Name: "member2",
|
||||
Replicas: 3,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
binding, err := buildBinding(tt.args.name, tt.args.namespace, tt.args.target, tt.args.status)
|
||||
if err != nil {
|
||||
t.Fatalf("build binding error: %v", err)
|
||||
}
|
||||
|
||||
karmadaClient := fakekarmadaclient.NewSimpleClientset(binding)
|
||||
factory := informerfactory.NewSharedInformerFactory(karmadaClient, 0)
|
||||
|
||||
desched := &Descheduler{
|
||||
KarmadaClient: karmadaClient,
|
||||
informerFactory: factory,
|
||||
bindingInformer: factory.Work().V1alpha2().ResourceBindings().Informer(),
|
||||
bindingLister: factory.Work().V1alpha2().ResourceBindings().Lister(),
|
||||
clusterInformer: factory.Cluster().V1alpha1().Clusters().Informer(),
|
||||
clusterLister: factory.Cluster().V1alpha1().Clusters().Lister(),
|
||||
schedulerEstimatorCache: estimatorclient.NewSchedulerEstimatorCache(),
|
||||
unschedulableThreshold: 5 * time.Minute,
|
||||
eventRecorder: record.NewFakeRecorder(1024),
|
||||
}
|
||||
schedulerEstimator := estimatorclient.NewSchedulerEstimator(desched.schedulerEstimatorCache, 5*time.Second)
|
||||
estimatorclient.RegisterSchedulerEstimator(schedulerEstimator)
|
||||
|
||||
for _, c := range tt.args.unschedulable {
|
||||
cluster := c
|
||||
mockClient := &estimatorservice.MockEstimatorClient{}
|
||||
mockResultFn := func(
|
||||
ctx context.Context,
|
||||
in *pb.UnschedulableReplicasRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) *pb.UnschedulableReplicasResponse {
|
||||
return &pb.UnschedulableReplicasResponse{
|
||||
UnschedulableReplicas: cluster.Replicas,
|
||||
}
|
||||
}
|
||||
mockClient.On(
|
||||
"GetUnschedulableReplicas",
|
||||
mock.MatchedBy(func(context.Context) bool { return true }),
|
||||
mock.MatchedBy(func(in *pb.UnschedulableReplicasRequest) bool { return in.Cluster == cluster.Name }),
|
||||
).Return(mockResultFn, nil)
|
||||
desched.schedulerEstimatorCache.AddCluster(cluster.Name, nil, mockClient)
|
||||
}
|
||||
|
||||
desched.informerFactory.Start(ctx.Done())
|
||||
if !cache.WaitForCacheSync(ctx.Done(), desched.bindingInformer.HasSynced) {
|
||||
t.Fatalf("Failed to wait for cache sync")
|
||||
}
|
||||
|
||||
key, err := cache.MetaNamespaceKeyFunc(binding)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get key of binding: %v", err)
|
||||
}
|
||||
if err := desched.worker(key); (err != nil) != tt.wantErr {
|
||||
t.Errorf("worker() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
binding, err = desched.KarmadaClient.WorkV1alpha2().ResourceBindings(tt.args.namespace).Get(ctx, tt.args.name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get binding: %v", err)
|
||||
return
|
||||
}
|
||||
gotResponse := binding.Spec.Clusters
|
||||
if !reflect.DeepEqual(gotResponse, tt.wantResponse) {
|
||||
t.Errorf("descheduler worker() gotResponse = %v, want %v", gotResponse, tt.wantResponse)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue