Implemented unit test for FilterOutSchedulable function
This commit is contained in:
parent
795696ff45
commit
bdb8987db6
|
|
@ -19,78 +19,42 @@ package simulator
|
|||
import (
|
||||
"testing"
|
||||
|
||||
. "k8s.io/contrib/cluster-autoscaler/utils/test"
|
||||
|
||||
kube_api "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func NewTestPredicateChecker() *PredicateChecker {
|
||||
return &PredicateChecker{
|
||||
predicates: map[string]algorithm.FitPredicate{
|
||||
"default": predicates.GeneralPredicates,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestReservation(t *testing.T) {
|
||||
pod := buildPod("p1", 100, 200000)
|
||||
pod2 := &kube_api.Pod{
|
||||
Spec: kube_api.PodSpec{
|
||||
Containers: []kube_api.Container{
|
||||
{
|
||||
Resources: kube_api.ResourceRequirements{
|
||||
Requests: kube_api.ResourceList{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2)
|
||||
pod := BuildTestPod("p1", 100, 200000)
|
||||
pod2 := BuildTestPod("p2", -1, -1)
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2)
|
||||
node := BuildTestNode("node1", 2000, 2000000)
|
||||
|
||||
node := &kube_api.Node{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Status: kube_api.NodeStatus{
|
||||
Capacity: kube_api.ResourceList{
|
||||
kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
kube_api.ResourceMemory: *resource.NewQuantity(2000000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
reservation, err := CalculateReservation(node, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, reservation, 0.01)
|
||||
|
||||
node2 := &kube_api.Node{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Name: "node2",
|
||||
},
|
||||
Status: kube_api.NodeStatus{
|
||||
Capacity: kube_api.ResourceList{
|
||||
kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
node2 := BuildTestNode("node1", 2000, -1)
|
||||
|
||||
_, err = CalculateReservation(node2, nodeInfo)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFindPlaceAllOk(t *testing.T) {
|
||||
pod1 := buildPod("p1", 300, 500000)
|
||||
new1 := buildPod("p2", 600, 500000)
|
||||
new2 := buildPod("p3", 500, 500000)
|
||||
pod1 := BuildTestPod("p1", 300, 500000)
|
||||
new1 := BuildTestPod("p2", 600, 500000)
|
||||
new2 := BuildTestPod("p3", 500, 500000)
|
||||
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
"n1": schedulercache.NewNodeInfo(pod1),
|
||||
"n2": schedulercache.NewNodeInfo(),
|
||||
}
|
||||
node1 := buildNode("n1", 1000, 2000000)
|
||||
node2 := buildNode("n2", 1000, 2000000)
|
||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
||||
nodeInfos["n1"].SetNode(node1)
|
||||
nodeInfos["n2"].SetNode(node2)
|
||||
|
||||
|
|
@ -103,17 +67,17 @@ func TestFindPlaceAllOk(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFindPlaceAllBas(t *testing.T) {
|
||||
pod1 := buildPod("p1", 300, 500000)
|
||||
new1 := buildPod("p2", 600, 500000)
|
||||
new2 := buildPod("p3", 500, 500000)
|
||||
new3 := buildPod("p4", 700, 500000)
|
||||
pod1 := BuildTestPod("p1", 300, 500000)
|
||||
new1 := BuildTestPod("p2", 600, 500000)
|
||||
new2 := BuildTestPod("p3", 500, 500000)
|
||||
new3 := BuildTestPod("p4", 700, 500000)
|
||||
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
"n1": schedulercache.NewNodeInfo(pod1),
|
||||
"n2": schedulercache.NewNodeInfo(),
|
||||
}
|
||||
node1 := buildNode("n1", 1000, 2000000)
|
||||
node2 := buildNode("n2", 1000, 2000000)
|
||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
||||
nodeInfos["n1"].SetNode(node1)
|
||||
nodeInfos["n2"].SetNode(node2)
|
||||
|
||||
|
|
@ -126,14 +90,14 @@ func TestFindPlaceAllBas(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFindNone(t *testing.T) {
|
||||
pod1 := buildPod("p1", 300, 500000)
|
||||
pod1 := BuildTestPod("p1", 300, 500000)
|
||||
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
"n1": schedulercache.NewNodeInfo(pod1),
|
||||
"n2": schedulercache.NewNodeInfo(),
|
||||
}
|
||||
node1 := buildNode("n1", 1000, 2000000)
|
||||
node2 := buildNode("n2", 1000, 2000000)
|
||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
||||
nodeInfos["n1"].SetNode(node1)
|
||||
nodeInfos["n2"].SetNode(node2)
|
||||
|
||||
|
|
@ -144,39 +108,3 @@ func TestFindNone(t *testing.T) {
|
|||
nodeInfos, NewTestPredicateChecker())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func buildPod(name string, cpu int64, mem int64) *kube_api.Pod {
|
||||
return &kube_api.Pod{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Spec: kube_api.PodSpec{
|
||||
Containers: []kube_api.Container{
|
||||
{
|
||||
Resources: kube_api.ResourceRequirements{
|
||||
Requests: kube_api.ResourceList{
|
||||
kube_api.ResourceCPU: *resource.NewMilliQuantity(cpu, resource.DecimalSI),
|
||||
kube_api.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildNode(name string, cpu int64, mem int64) *kube_api.Node {
|
||||
return &kube_api.Node{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Status: kube_api.NodeStatus{
|
||||
Capacity: kube_api.ResourceList{
|
||||
kube_api.ResourceCPU: *resource.NewMilliQuantity(cpu, resource.DecimalSI),
|
||||
kube_api.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
|
||||
kube_api.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
kube_api "k8s.io/kubernetes/pkg/api"
|
||||
kube_client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
// We need to import provider to intialize default scheduler.
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
|
@ -50,6 +51,15 @@ func NewPredicateChecker(kubeClient *kube_client.Client) (*PredicateChecker, err
|
|||
}, nil
|
||||
}
|
||||
|
||||
// NewTestPredicateChecker builds test version of PredicateChecker.
|
||||
func NewTestPredicateChecker() *PredicateChecker {
|
||||
return &PredicateChecker{
|
||||
predicates: map[string]algorithm.FitPredicate{
|
||||
"default": predicates.GeneralPredicates,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FitsAny checks if the given pod can be place on any of the given nodes.
|
||||
func (p *PredicateChecker) FitsAny(pod *kube_api.Pod, nodeInfos map[string]*schedulercache.NodeInfo) (string, error) {
|
||||
for name, nodeInfo := range nodeInfos {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
kube_api "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
// BuildTestPod creates a pod with specified resources.
|
||||
func BuildTestPod(name string, cpu int64, mem int64) *kube_api.Pod {
|
||||
pod := &kube_api.Pod{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Spec: kube_api.PodSpec{
|
||||
Containers: []kube_api.Container{
|
||||
{
|
||||
Resources: kube_api.ResourceRequirements{
|
||||
Requests: kube_api.ResourceList{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if cpu >= 0 {
|
||||
pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||
}
|
||||
if mem >= 0 {
|
||||
pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI)
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
// BuildTestNode creates a node with specified capacity.
|
||||
func BuildTestNode(name string, cpu int64, mem int64) *kube_api.Node {
|
||||
node := &kube_api.Node{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Status: kube_api.NodeStatus{
|
||||
Capacity: kube_api.ResourceList{
|
||||
kube_api.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if cpu >= 0 {
|
||||
node.Status.Capacity[kube_api.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||
}
|
||||
if mem >= 0 {
|
||||
node.Status.Capacity[kube_api.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI)
|
||||
}
|
||||
|
||||
node.Status.Allocatable = node.Status.Capacity
|
||||
|
||||
return node
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/contrib/cluster-autoscaler/simulator"
|
||||
. "k8s.io/contrib/cluster-autoscaler/utils/test"
|
||||
|
||||
kube_api "k8s.io/kubernetes/pkg/api"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFilterOutSchedulable(t *testing.T) {
|
||||
p1 := BuildTestPod("p1", 1500, 200000)
|
||||
p2 := BuildTestPod("p2", 3000, 200000)
|
||||
p3 := BuildTestPod("p3", 100, 200000)
|
||||
unschedulablePods := []*kube_api.Pod{p1, p2, p3}
|
||||
|
||||
scheduledPod1 := BuildTestPod("s1", 100, 200000)
|
||||
scheduledPod2 := BuildTestPod("s2", 1500, 200000)
|
||||
scheduledPod1.Spec.NodeName = "node1"
|
||||
scheduledPod2.Spec.NodeName = "node1"
|
||||
|
||||
node := BuildTestNode("node1", 2000, 2000000)
|
||||
|
||||
predicateChecker := simulator.NewTestPredicateChecker()
|
||||
|
||||
res := FilterOutSchedulable(unschedulablePods, []*kube_api.Node{node}, []*kube_api.Pod{scheduledPod1}, predicateChecker)
|
||||
assert.Equal(t, 1, len(res))
|
||||
assert.Equal(t, p2, res[0])
|
||||
|
||||
res2 := FilterOutSchedulable(unschedulablePods, []*kube_api.Node{node}, []*kube_api.Pod{scheduledPod1, scheduledPod2}, predicateChecker)
|
||||
assert.Equal(t, 2, len(res2))
|
||||
assert.Equal(t, p1, res2[0])
|
||||
assert.Equal(t, p2, res2[1])
|
||||
}
|
||||
Loading…
Reference in New Issue