416 lines
19 KiB
Go
416 lines
19 KiB
Go
/*
|
|
Copyright 2021 The Karmada Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package server
|
|
|
|
import (
|
|
"context"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"google.golang.org/grpc/metadata"
|
|
appsv1 "k8s.io/api/apps/v1"
|
|
corev1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
discoveryfake "k8s.io/client-go/discovery/fake"
|
|
dynamicfake "k8s.io/client-go/dynamic/fake"
|
|
"k8s.io/client-go/kubernetes/fake"
|
|
coretesting "k8s.io/client-go/testing"
|
|
|
|
"github.com/karmada-io/karmada/cmd/scheduler-estimator/app/options"
|
|
"github.com/karmada-io/karmada/pkg/estimator/pb"
|
|
"github.com/karmada-io/karmada/pkg/util"
|
|
testhelper "github.com/karmada-io/karmada/test/helper"
|
|
)
|
|
|
|
func TestAccurateSchedulerEstimatorServer_MaxAvailableReplicas(t *testing.T) {
|
|
opt := &options.Options{
|
|
ClusterName: "fake",
|
|
}
|
|
type args struct {
|
|
request *pb.MaxAvailableReplicasRequest
|
|
}
|
|
tests := []struct {
|
|
name string
|
|
objs []runtime.Object
|
|
args args
|
|
wantResponse *pb.MaxAvailableReplicasResponse
|
|
wantErr bool
|
|
}{
|
|
{
|
|
name: "normal",
|
|
// node 1 left: 2 cpu, 6 mem, 8 pod, 14 storage
|
|
// node 2 left: 3 cpu, 5 mem, 9 pod, 12 storage
|
|
// node 3 left: 8 cpu, 16 mem, 11 pod, 16 storage
|
|
objs: []runtime.Object{
|
|
testhelper.NewNode("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewNode("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
},
|
|
// request 1 cpu, 2 mem
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
wantResponse: &pb.MaxAvailableReplicasResponse{
|
|
MaxReplicas: 12,
|
|
},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "pod resource strict",
|
|
// node 1 left: 2 cpu, 6 mem, 1 pod, 14 storage
|
|
// node 2 left: 3 cpu, 5 mem, 1 pod, 12 storage
|
|
// node 3 left: 8 cpu, 16 mem, 11 pod, 16 storage
|
|
objs: []runtime.Object{
|
|
testhelper.NewNode("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewNode("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 3*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
},
|
|
// request 1 cpu, 2 mem
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
wantResponse: &pb.MaxAvailableReplicasResponse{
|
|
MaxReplicas: 10,
|
|
},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "request with node selector",
|
|
// node 1(with label: a = 1) left: 2 cpu, 6 mem, 8 pod, 14 storage
|
|
// node 2(with label: a = 3; b = 2) left: 3 cpu, 5 mem, 9 pod, 12 storage
|
|
// node 3(without labels) left: 8 cpu, 16 mem, 11 pod, 16 storage
|
|
objs: []runtime.Object{
|
|
testhelper.MakeNodeWithLabels("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "1"}),
|
|
testhelper.MakeNodeWithLabels("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "3", "b": "2"}),
|
|
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
},
|
|
// request 1 cpu, 2 mem and with node label a = 3
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
NodeClaim: &pb.NodeClaim{
|
|
NodeSelector: map[string]string{
|
|
"a": "3",
|
|
},
|
|
},
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
wantResponse: &pb.MaxAvailableReplicasResponse{
|
|
MaxReplicas: 2,
|
|
},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "request with node affinity",
|
|
// node 1(with label: a = 1) left: 2 cpu, 6 mem, 8 pod, 14 storage
|
|
// node 2(with label: a = 3; b = 2) left: 3 cpu, 5 mem, 9 pod, 12 storage
|
|
// node 3(without labels) left: 8 cpu, 16 mem, 11 pod, 16 storage
|
|
objs: []runtime.Object{
|
|
testhelper.MakeNodeWithLabels("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "1"}),
|
|
testhelper.MakeNodeWithLabels("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "3", "b": "2"}),
|
|
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
},
|
|
// request 1 cpu, 2 mem and with node label a > 0
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
NodeClaim: &pb.NodeClaim{
|
|
NodeAffinity: &corev1.NodeSelector{
|
|
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
|
{
|
|
MatchExpressions: []corev1.NodeSelectorRequirement{
|
|
{
|
|
Key: "a",
|
|
Operator: corev1.NodeSelectorOpGt,
|
|
Values: []string{"0"},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
wantResponse: &pb.MaxAvailableReplicasResponse{
|
|
MaxReplicas: 4,
|
|
},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "request with tolerations",
|
|
// node 1(with taint: key1 = value1) left: 2 cpu, 6 mem, 8 pod, 14 storage
|
|
// node 2(with label: key2 = value2) left: 3 cpu, 5 mem, 9 pod, 12 storage
|
|
// node 3(without labels) left: 8 cpu, 16 mem, 11 pod, 16 storage
|
|
objs: []runtime.Object{
|
|
testhelper.MakeNodeWithTaints("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, []corev1.Taint{{Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}}),
|
|
testhelper.MakeNodeWithTaints("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, []corev1.Taint{{Key: "key2", Value: "value2", Effect: corev1.TaintEffectNoSchedule}}),
|
|
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
|
},
|
|
// request 1 cpu, 2 mem and with node label a > 0
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
NodeClaim: &pb.NodeClaim{
|
|
Tolerations: []corev1.Toleration{
|
|
{Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
|
|
},
|
|
},
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
wantResponse: &pb.MaxAvailableReplicasResponse{
|
|
MaxReplicas: 10,
|
|
},
|
|
wantErr: false,
|
|
},
|
|
}
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
gvrToListKind := map[schema.GroupVersionResource]string{
|
|
{Group: "apps", Version: "v1", Resource: "deployments"}: "DeploymentList",
|
|
}
|
|
dynamicClient := dynamicfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind)
|
|
discoveryClient := &discoveryfake.FakeDiscovery{
|
|
Fake: &coretesting.Fake{},
|
|
}
|
|
discoveryClient.Resources = []*metav1.APIResourceList{
|
|
{
|
|
GroupVersion: appsv1.SchemeGroupVersion.String(),
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "deployments", Namespaced: true, Kind: "Deployment"},
|
|
},
|
|
},
|
|
}
|
|
|
|
es, _ := NewEstimatorServer(fake.NewSimpleClientset(tt.objs...), dynamicClient, discoveryClient, opt, ctx.Done())
|
|
|
|
es.informerFactory.Start(ctx.Done())
|
|
es.informerFactory.WaitForCacheSync(ctx.Done())
|
|
es.informerManager.WaitForCacheSync()
|
|
|
|
gotResponse, err := es.MaxAvailableReplicas(ctx, tt.args.request)
|
|
if (err != nil) != tt.wantErr {
|
|
t.Errorf("MaxAvailableReplicas() error = %v, wantErr %v", err, tt.wantErr)
|
|
return
|
|
}
|
|
if !reflect.DeepEqual(gotResponse, tt.wantResponse) {
|
|
t.Errorf("MaxAvailableReplicas() gotResponse = %v, want %v", gotResponse, tt.wantResponse)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func BenchmarkAccurateSchedulerEstimatorServer_MaxAvailableReplicas(b *testing.B) {
|
|
opt := &options.Options{
|
|
ClusterName: "fake",
|
|
}
|
|
type args struct {
|
|
request *pb.MaxAvailableReplicasRequest
|
|
}
|
|
tests := []struct {
|
|
name string
|
|
allNodesNum int
|
|
allPodsNum int
|
|
nodeTemplate *corev1.Node
|
|
podTemplate *corev1.Pod
|
|
args args
|
|
}{
|
|
{
|
|
name: "500 nodes and 10,000 pods without affinity and tolerations",
|
|
allNodesNum: 500,
|
|
allPodsNum: 10000,
|
|
nodeTemplate: testhelper.NewNode("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage),
|
|
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
|
// request 1 cpu, 2 mem
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "5000 nodes and 100,000 pods without affinity and tolerations",
|
|
allNodesNum: 5000,
|
|
allPodsNum: 100000,
|
|
nodeTemplate: testhelper.NewNode("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage),
|
|
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
|
// request 1 cpu, 2 mem
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "5000 nodes and 100,000 pods with taint and tolerations",
|
|
allNodesNum: 5000,
|
|
allPodsNum: 100000,
|
|
nodeTemplate: testhelper.MakeNodeWithTaints("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage, []corev1.Taint{{Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}}),
|
|
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
|
// request 1 cpu, 2 mem
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
NodeClaim: &pb.NodeClaim{
|
|
Tolerations: []corev1.Toleration{
|
|
{Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
|
|
},
|
|
},
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "5000 nodes and 100,000 pods with node affinity and tolerations",
|
|
allNodesNum: 5000,
|
|
allPodsNum: 100000,
|
|
nodeTemplate: testhelper.MakeNodeWithLabels("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "1"}),
|
|
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
|
// request 1 cpu, 2 mem
|
|
args: args{
|
|
request: &pb.MaxAvailableReplicasRequest{
|
|
Cluster: "fake",
|
|
ReplicaRequirements: pb.ReplicaRequirements{
|
|
NodeClaim: &pb.NodeClaim{
|
|
NodeAffinity: &corev1.NodeSelector{
|
|
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
|
{
|
|
MatchExpressions: []corev1.NodeSelectorRequirement{
|
|
{
|
|
Key: "a",
|
|
Operator: corev1.NodeSelectorOpGt,
|
|
Values: []string{"0"},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Tolerations: []corev1.Toleration{
|
|
{Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
|
|
},
|
|
},
|
|
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
for _, tt := range tests {
|
|
b.Run(tt.name, func(b *testing.B) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
ctx = metadata.NewIncomingContext(ctx, metadata.Pairs(string(util.ContextKeyObject), "fake"))
|
|
|
|
gvrToListKind := map[schema.GroupVersionResource]string{
|
|
{Group: "apps", Version: "v1", Resource: "deployments"}: "DeploymentList",
|
|
}
|
|
dynamicClient := dynamicfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind)
|
|
discoveryClient := &discoveryfake.FakeDiscovery{
|
|
Fake: &coretesting.Fake{},
|
|
}
|
|
discoveryClient.Resources = []*metav1.APIResourceList{
|
|
{
|
|
GroupVersion: appsv1.SchemeGroupVersion.String(),
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "deployments", Namespaced: true, Kind: "Deployment"},
|
|
},
|
|
},
|
|
}
|
|
nodes, pods := testhelper.MakeNodesAndPods(tt.allNodesNum, tt.allPodsNum, tt.nodeTemplate, tt.podTemplate)
|
|
objs := make([]runtime.Object, 0, len(nodes)+len(pods))
|
|
for _, node := range nodes {
|
|
objs = append(objs, node)
|
|
}
|
|
for _, pod := range pods {
|
|
objs = append(objs, pod)
|
|
}
|
|
|
|
es, _ := NewEstimatorServer(fake.NewSimpleClientset(objs...), dynamicClient, discoveryClient, opt, ctx.Done())
|
|
|
|
es.informerFactory.Start(ctx.Done())
|
|
es.informerFactory.WaitForCacheSync(ctx.Done())
|
|
es.informerManager.WaitForCacheSync()
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
_, err := es.MaxAvailableReplicas(ctx, tt.args.request)
|
|
if err != nil {
|
|
b.Fatalf("MaxAvailableReplicas() error = %v", err)
|
|
return
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|