add scheduler estimator test
Signed-off-by: Garrybest <garrybest@foxmail.com>
This commit is contained in:
parent
a87fbb98ac
commit
35b02bc497
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/kr/pretty"
|
||||
|
@ -39,7 +38,6 @@ type AccurateSchedulerEstimatorServer struct {
|
|||
nodeLister listv1.NodeLister
|
||||
podLister listv1.PodLister
|
||||
getPodFunc func(nodeName string) ([]*corev1.Pod, error)
|
||||
httpServer *http.Server
|
||||
}
|
||||
|
||||
// NewEstimatorServer creates an instance of AccurateSchedulerEstimatorServer.
|
||||
|
@ -120,9 +118,6 @@ func (es *AccurateSchedulerEstimatorServer) Start(ctx context.Context) error {
|
|||
go func() {
|
||||
<-stopCh
|
||||
s.GracefulStop()
|
||||
if err := es.httpServer.Shutdown(context.Background()); nil != err {
|
||||
klog.Fatalf("server shutdown failed, err: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the gRPC server.
|
||||
|
|
|
@ -0,0 +1,225 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/scheduler-estimator/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/estimator/pb"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
func TestAccurateSchedulerEstimatorServer_MaxAvailableReplicas(t *testing.T) {
|
||||
opt := &options.Options{
|
||||
ClusterName: "fake",
|
||||
}
|
||||
type args struct {
|
||||
request *pb.MaxAvailableReplicasRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
objs []runtime.Object
|
||||
args args
|
||||
wantResponse *pb.MaxAvailableReplicasResponse
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
// node 1 left: 2 cpu, 6 mem, 8 pod, 14 storage
|
||||
// node 2 left: 3 cpu, 5 mem, 9 pod, 12 storage
|
||||
// node 3 left: 8 cpu, 16 mem, 11 pod, 16 storage
|
||||
objs: []runtime.Object{
|
||||
testhelper.NewNode("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewNode("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
},
|
||||
// request 1 cpu, 2 mem
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantResponse: &pb.MaxAvailableReplicasResponse{
|
||||
MaxReplicas: 12,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pod resource strict",
|
||||
// node 1 left: 2 cpu, 6 mem, 1 pod, 14 storage
|
||||
// node 2 left: 3 cpu, 5 mem, 1 pod, 12 storage
|
||||
// node 3 left: 8 cpu, 16 mem, 11 pod, 16 storage
|
||||
objs: []runtime.Object{
|
||||
testhelper.NewNode("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 4*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewNode("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 3*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
},
|
||||
// request 1 cpu, 2 mem
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantResponse: &pb.MaxAvailableReplicasResponse{
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "request with node selector",
|
||||
// node 1(with label: a = 1) left: 2 cpu, 6 mem, 8 pod, 14 storage
|
||||
// node 2(with label: a = 3; b = 2) left: 3 cpu, 5 mem, 9 pod, 12 storage
|
||||
// node 3(without labels) left: 8 cpu, 16 mem, 11 pod, 16 storage
|
||||
objs: []runtime.Object{
|
||||
testhelper.MakeNodeWithLabels("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "1"}),
|
||||
testhelper.MakeNodeWithLabels("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "3", "b": "2"}),
|
||||
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
},
|
||||
// request 1 cpu, 2 mem and with node label a = 3
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
NodeClaim: &pb.NodeClaim{
|
||||
NodeSelector: map[string]string{
|
||||
"a": "3",
|
||||
},
|
||||
},
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantResponse: &pb.MaxAvailableReplicasResponse{
|
||||
MaxReplicas: 2,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "request with node affinity",
|
||||
// node 1(with label: a = 1) left: 2 cpu, 6 mem, 8 pod, 14 storage
|
||||
// node 2(with label: a = 3; b = 2) left: 3 cpu, 5 mem, 9 pod, 12 storage
|
||||
// node 3(without labels) left: 8 cpu, 16 mem, 11 pod, 16 storage
|
||||
objs: []runtime.Object{
|
||||
testhelper.MakeNodeWithLabels("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "1"}),
|
||||
testhelper.MakeNodeWithLabels("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, map[string]string{"a": "3", "b": "2"}),
|
||||
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
},
|
||||
// request 1 cpu, 2 mem and with node label a > 0
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
NodeClaim: &pb.NodeClaim{
|
||||
NodeAffinity: &corev1.NodeSelector{
|
||||
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "a",
|
||||
Operator: corev1.NodeSelectorOpGt,
|
||||
Values: []string{"0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantResponse: &pb.MaxAvailableReplicasResponse{
|
||||
MaxReplicas: 4,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "request with tolerations",
|
||||
// node 1(with taint: key1 = value1) left: 2 cpu, 6 mem, 8 pod, 14 storage
|
||||
// node 2(with label: key2 = value2) left: 3 cpu, 5 mem, 9 pod, 12 storage
|
||||
// node 3(without labels) left: 8 cpu, 16 mem, 11 pod, 16 storage
|
||||
objs: []runtime.Object{
|
||||
testhelper.MakeNodeWithTaints("machine1", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, []corev1.Taint{{Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}}),
|
||||
testhelper.MakeNodeWithTaints("machine2", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage, []corev1.Taint{{Key: "key2", Value: "value2", Effect: corev1.TaintEffectNoSchedule}}),
|
||||
testhelper.NewNode("machine3", 8*testhelper.ResourceUnitCPU, 16*testhelper.ResourceUnitMem, 11*testhelper.ResourceUnitPod, 16*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod1", "machine1", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod2", "machine1", 3*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
testhelper.NewPodWithRequest("pod3", "machine1", 2*testhelper.ResourceUnitCPU, 4*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod4", "machine2", 4*testhelper.ResourceUnitCPU, 8*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
testhelper.NewPodWithRequest("pod5", "machine2", 1*testhelper.ResourceUnitCPU, 3*testhelper.ResourceUnitMem, 2*testhelper.ResourceUnitEphemeralStorage),
|
||||
},
|
||||
// request 1 cpu, 2 mem and with node label a > 0
|
||||
args: args{
|
||||
request: &pb.MaxAvailableReplicasRequest{
|
||||
Cluster: "fake",
|
||||
ReplicaRequirements: pb.ReplicaRequirements{
|
||||
NodeClaim: &pb.NodeClaim{
|
||||
Tolerations: []corev1.Toleration{
|
||||
{Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
|
||||
},
|
||||
},
|
||||
ResourceRequest: testhelper.NewResourceList(1*testhelper.ResourceUnitCPU, 2*testhelper.ResourceUnitMem, testhelper.ResourceUnitZero),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantResponse: &pb.MaxAvailableReplicasResponse{
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
es := NewEstimatorServer(fake.NewSimpleClientset(tt.objs...), opt)
|
||||
|
||||
es.informerFactory.Start(ctx.Done())
|
||||
if !es.waitForCacheSync(ctx.Done()) {
|
||||
t.Errorf("MaxAvailableReplicas() error = %v, wantErr %v", fmt.Errorf("failed to wait for cache sync"), tt.wantErr)
|
||||
}
|
||||
|
||||
gotResponse, err := es.MaxAvailableReplicas(ctx, tt.args.request)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("MaxAvailableReplicas() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotResponse, tt.wantResponse) {
|
||||
t.Errorf("MaxAvailableReplicas() gotResponse = %v, want %v", gotResponse, tt.wantResponse)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -7,12 +7,23 @@ import (
|
|||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// These are different resource units.
|
||||
const (
|
||||
ResourceUnitZero int64 = 0
|
||||
ResourceUnitCPU int64 = 1000
|
||||
ResourceUnitMem int64 = 1024 * 1024 * 1024
|
||||
ResourceUnitPod int64 = 1
|
||||
ResourceUnitEphemeralStorage int64 = 1024 * 1024 * 1024
|
||||
ResourceUnitGPU int64 = 1
|
||||
)
|
||||
|
||||
// NewDeployment will build a deployment object.
|
||||
func NewDeployment(namespace string, name string) *appsv1.Deployment {
|
||||
podLabels := map[string]string{"app": "nginx"}
|
||||
|
@ -242,3 +253,129 @@ func NewJob(namespace string, name string) *batchv1.Job {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewResourceList will build a ResourceList.
|
||||
func NewResourceList(milliCPU, memory, ephemeralStorage int64) corev1.ResourceList {
|
||||
return corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPodWithRequest will build a Pod with resource request.
|
||||
func NewPodWithRequest(pod, node string, milliCPU, memory, ephemeralStorage int64) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod},
|
||||
Spec: corev1.PodSpec{
|
||||
NodeName: node,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodRunning,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode will build a ready node with resource.
|
||||
func NewNode(node string, milliCPU, memory, pods, ephemeralStorage int64) *corev1.Node {
|
||||
return &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: node},
|
||||
Status: corev1.NodeStatus{
|
||||
Capacity: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
corev1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
Allocatable: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
corev1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
Conditions: []corev1.NodeCondition{
|
||||
{
|
||||
Type: corev1.NodeReady,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: "kubelet is posting ready status",
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNodeWithLabels will build a ready node with resource and labels.
|
||||
func MakeNodeWithLabels(node string, milliCPU, memory, pods, ephemeralStorage int64, labels map[string]string) *corev1.Node {
|
||||
return &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: node, Labels: labels},
|
||||
Status: corev1.NodeStatus{
|
||||
Capacity: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
corev1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
Allocatable: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
corev1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
Conditions: []corev1.NodeCondition{
|
||||
{
|
||||
Type: corev1.NodeReady,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: "kubelet is posting ready status",
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNodeWithTaints will build a ready node with resource and taints.
|
||||
func MakeNodeWithTaints(node string, milliCPU, memory, pods, ephemeralStorage int64, taints []corev1.Taint) *corev1.Node {
|
||||
return &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: node},
|
||||
Spec: corev1.NodeSpec{
|
||||
Taints: taints,
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
Capacity: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
corev1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
Allocatable: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
corev1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
corev1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorage, resource.BinarySI),
|
||||
},
|
||||
Conditions: []corev1.NodeCondition{
|
||||
{
|
||||
Type: corev1.NodeReady,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: "kubelet is posting ready status",
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue