add benchmark in estimator
Signed-off-by: Garrybest <garrybest@foxmail.com>
This commit is contained in:
parent
989f3fe8fb
commit
b86461e82f
|
@ -291,7 +291,7 @@ func traceMaxAvailableReplicas(object string, start time.Time, request *pb.MaxAv
|
|||
klog.Errorf("Failed to calculate cluster available replicas: %v", *err)
|
||||
return
|
||||
}
|
||||
klog.Infof("Finish calculating cluster available replicas of resource(%s), max replicas: %d, time elapsed: %s", object, (*response).MaxReplicas, time.Since(start))
|
||||
klog.V(2).Infof("Finish calculating cluster available replicas of resource(%s), max replicas: %d, time elapsed: %s", object, (*response).MaxReplicas, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -304,6 +304,6 @@ func traceGetUnschedulableReplicas(object string, start time.Time, request *pb.U
|
|||
klog.Errorf("Failed to detect cluster unschedulable replicas: %v", *err)
|
||||
return
|
||||
}
|
||||
klog.Infof("Finish detecting cluster unschedulable replicas of resource(%s), unschedulable replicas: %d, time elapsed: %s", object, (*response).UnschedulableReplicas, time.Since(start))
|
||||
klog.V(2).Infof("Finish detecting cluster unschedulable replicas of resource(%s), unschedulable replicas: %d, time elapsed: %s", object, (*response).UnschedulableReplicas, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
|
||||
"github.com/karmada-io/karmada/cmd/scheduler-estimator/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/estimator/pb"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
@ -245,3 +247,156 @@ func TestAccurateSchedulerEstimatorServer_MaxAvailableReplicas(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAccurateSchedulerEstimatorServer_MaxAvailableReplicas(b *testing.B) {
|
||||
opt := &options.Options{
|
||||
ClusterName: "fake",
|
||||
}
|
||||
type args struct {
|
||||
request *pb.MaxAvailableReplicasRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
allNodesNum int
|
||||
allPodsNum int
|
||||
nodeTemplate *corev1.Node
|
||||
podTemplate *corev1.Pod
|
||||
args args
|
||||
}{
|
||||
{
|
||||
name: "500 nodes and 10,000 pods without affinity and tolerations",
|
||||
allNodesNum: 500,
|
||||
allPodsNum: 10000,
|
||||
nodeTemplate: testhelper.NewNode("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage),
|
||||
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
||||
// request 1 cpu, 2 mem
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "5000 nodes and 100,000 pods without affinity and tolerations",
|
||||
allNodesNum: 5000,
|
||||
allPodsNum: 100000,
|
||||
nodeTemplate: testhelper.NewNode("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage),
|
||||
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
||||
// request 1 cpu, 2 mem
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "5000 nodes and 100,000 pods with taint and tolerations",
|
||||
allNodesNum: 5000,
|
||||
allPodsNum: 100000,
|
||||
nodeTemplate: testhelper.MakeNodeWithTaints("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage, []corev1.Taint{{Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}}),
|
||||
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
||||
// request 1 cpu, 2 mem
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
NodeClaim: &pb.NodeClaim{
|
||||
Tolerations: []corev1.Toleration{
|
||||
{Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
|
||||
},
|
||||
},
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "5000 nodes and 100,000 pods with node affinity and tolerations",
|
||||
allNodesNum: 5000,
|
||||
allPodsNum: 100000,
|
||||
nodeTemplate: testhelper.MakeNodeWithLabels("", 100*testhelper.ResourceUnitCPU, 200*testhelper.ResourceUnitMem, 110*testhelper.ResourceUnitPod, 200*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "1"}),
|
||||
podTemplate: testhelper.NewPodWithRequest("", "", 2*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitEphemeralStorage),
|
||||
// request 1 cpu, 2 mem
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
NodeClaim: &pb.NodeClaim{
|
||||
NodeAffinity: &corev1.NodeSelector{
|
||||
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "a",
|
||||
Operator: corev1.NodeSelectorOpGt,
|
||||
Values: []string{"0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Tolerations: []corev1.Toleration{
|
||||
{Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
|
||||
},
|
||||
},
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ctx = metadata.NewIncomingContext(ctx, metadata.Pairs(string(util.ContextKeyObject), "fake"))
|
||||
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{
|
||||
{Group: "apps", Version: "v1", Resource: "deployments"}: "DeploymentList",
|
||||
}
|
||||
dynamicClient := dynamicfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind)
|
||||
discoveryClient := &discoveryfake.FakeDiscovery{
|
||||
Fake: &coretesting.Fake{},
|
||||
}
|
||||
discoveryClient.Resources = []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "deployments", Namespaced: true, Kind: "Deployment"},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodes, pods := testhelper.MakeNodesAndPods(tt.allNodesNum, tt.allPodsNum, tt.nodeTemplate, tt.podTemplate)
|
||||
objs := make([]runtime.Object, 0, len(nodes)+len(pods))
|
||||
for _, node := range nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
es := NewEstimatorServer(fake.NewSimpleClientset(objs...), dynamicClient, discoveryClient, opt, ctx.Done())
|
||||
|
||||
es.informerFactory.Start(ctx.Done())
|
||||
if !es.waitForCacheSync(ctx.Done()) {
|
||||
b.Fatalf("MaxAvailableReplicas() error = %v", fmt.Errorf("failed to wait for cache sync"))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := es.MaxAvailableReplicas(ctx, tt.args.request)
|
||||
if err != nil {
|
||||
b.Fatalf("MaxAvailableReplicas() error = %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -321,6 +321,30 @@ func NewNode(node string, milliCPU, memory, pods, ephemeralStorage int64) *corev
|
|||
}
|
||||
}
|
||||
|
||||
// MakeNodesAndPods will make batch of nodes and pods based on template.
|
||||
func MakeNodesAndPods(allNodesNum, allPodsNum int, nodeTemplate *corev1.Node, podTemplate *corev1.Pod) ([]*corev1.Node, []*corev1.Pod) {
|
||||
nodes := make([]*corev1.Node, 0, allNodesNum)
|
||||
pods := make([]*corev1.Pod, 0, allPodsNum)
|
||||
|
||||
avg, residue := allPodsNum/allNodesNum, allPodsNum%allNodesNum
|
||||
for i := 0; i < allNodesNum; i++ {
|
||||
node := nodeTemplate.DeepCopy()
|
||||
node.Name = fmt.Sprintf("node-%d", i)
|
||||
nodes = append(nodes, node)
|
||||
num := avg
|
||||
if i < residue {
|
||||
num++
|
||||
}
|
||||
for j := 0; j < num; j++ {
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod.Name = fmt.Sprintf("node-%d-%d", i, j)
|
||||
pod.Spec.NodeName = node.Name
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return nodes, pods
|
||||
}
|
||||
|
||||
// MakeNodeWithLabels will build a ready node with resource and labels.
|
||||
func MakeNodeWithLabels(node string, milliCPU, memory, pods, ephemeralStorage int64, labels map[string]string) *corev1.Node {
|
||||
return &corev1.Node{
|
||||
|
|
Loading…
Reference in New Issue