pass node infos to simulator

This commit is contained in:
Aleksandra Malinowska 2020-02-25 15:48:16 +01:00
parent ef9d895463
commit d11b39603d
3 changed files with 171 additions and 105 deletions

View File

@ -436,7 +436,17 @@ func (sd *ScaleDown) UpdateUnneededNodes(
}
candidateNames[candidate.Name] = true
}
destinationNames := make(map[string]bool, len(destinationNodes))
for _, destination := range destinationNodes {
if destination == nil {
// Do we need to check this?
klog.Errorf("Unexpected nil node in node info")
continue
}
destinationNames[destination.Name] = true
}
candidateNodeInfos := make([]*schedulernodeinfo.NodeInfo, 0, len(candidateNames))
destinationNodeInfos := make([]*schedulernodeinfo.NodeInfo, 0, len(destinationNames))
for _, nodeInfo := range allNodeInfos {
if nodeInfo.Node() == nil {
// Do we need to check this?
@ -446,6 +456,9 @@ func (sd *ScaleDown) UpdateUnneededNodes(
if candidateNames[nodeInfo.Node().Name] {
candidateNodeInfos = append(candidateNodeInfos, nodeInfo)
}
if destinationNames[nodeInfo.Node().Name] {
destinationNodeInfos = append(destinationNodeInfos, nodeInfo)
}
}
sd.updateUnremovableNodes(allNodeInfos)
@ -523,7 +536,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
// Look for nodes to remove in the current candidates
nodesToRemove, unremovable, newHints, simulatorErr := simulator.FindNodesToRemove(
currentCandidates,
destinationNodes,
destinationNodeInfos,
pods,
nil,
sd.context.ClusterSnapshot,
@ -556,7 +569,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
additionalNodesToRemove, additionalUnremovable, additionalNewHints, simulatorErr :=
simulator.FindNodesToRemove(
currentNonCandidates[:additionalCandidatesPoolSize],
destinationNodes,
destinationNodeInfos,
pods,
nil,
sd.context.ClusterSnapshot,
@ -687,24 +700,20 @@ func (sd *ScaleDown) markSimulationError(simulatorErr errors.AutoscalerError,
// chooseCandidates splits nodes into current candidates for scale-down and the
// rest. Current candidates are unneeded nodes from the previous run that are
// still in the nodes list.
func (sd *ScaleDown) chooseCandidates(nodes []*schedulernodeinfo.NodeInfo) ([]*apiv1.Node, []*apiv1.Node) {
func (sd *ScaleDown) chooseCandidates(nodes []*schedulernodeinfo.NodeInfo) ([]*schedulernodeinfo.NodeInfo, []*schedulernodeinfo.NodeInfo) {
// Number of candidates should not be capped. We will look for nodes to remove
// from the whole set of nodes.
if sd.context.ScaleDownNonEmptyCandidatesCount <= 0 {
candidates := make([]*apiv1.Node, len(nodes))
for i, node := range nodes {
candidates[i] = node.Node()
}
return candidates, []*apiv1.Node{}
return nodes, nil
}
currentCandidates := make([]*apiv1.Node, 0, len(sd.unneededNodesList))
currentNonCandidates := make([]*apiv1.Node, 0, len(nodes))
currentCandidates := make([]*schedulernodeinfo.NodeInfo, 0, len(sd.unneededNodesList))
currentNonCandidates := make([]*schedulernodeinfo.NodeInfo, 0, len(nodes))
for _, nodeInfo := range nodes {
node := nodeInfo.Node()
if _, found := sd.unneededNodes[node.Name]; found {
currentCandidates = append(currentCandidates, node)
currentCandidates = append(currentCandidates, nodeInfo)
} else {
currentNonCandidates = append(currentNonCandidates, node)
currentNonCandidates = append(currentNonCandidates, nodeInfo)
}
}
return currentCandidates, currentNonCandidates
@ -786,12 +795,8 @@ func (sd *ScaleDown) TryToScaleDown(
}
nodesWithoutMaster := filterOutMasters(allNodeInfos, pods)
nodes := make([]*apiv1.Node, len(nodesWithoutMaster))
for i, nodeInfo := range nodesWithoutMaster {
nodes[i] = nodeInfo.Node()
}
candidates := make([]*apiv1.Node, 0)
candidates := make([]*schedulernodeinfo.NodeInfo, 0)
readinessMap := make(map[string]bool)
candidateNodeGroups := make(map[string]cloudprovider.NodeGroup)
gpuLabel := sd.context.CloudProvider.GPULabel()
@ -880,9 +885,8 @@ func (sd *ScaleDown) TryToScaleDown(
continue
}
candidates = append(candidates, node)
candidates = append(candidates, nodeInfo)
candidateNodeGroups[node.Name] = nodeGroup
}
if len(candidates) == 0 {
@ -918,7 +922,7 @@ func (sd *ScaleDown) TryToScaleDown(
// We look for only 1 node so new hints may be incomplete.
nodesToRemove, unremovable, _, err := simulator.FindNodesToRemove(
candidates,
nodes,
nodesWithoutMaster,
pods,
sd.context.ListerRegistry,
sd.context.ClusterSnapshot,
@ -1002,16 +1006,12 @@ func updateScaleDownMetrics(scaleDownStart time.Time, findNodesToRemoveDuration
}
func (sd *ScaleDown) getEmptyNodesNoResourceLimits(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod, maxEmptyBulkDelete int) []*apiv1.Node {
nodes := make([]*apiv1.Node, 0, len(candidates))
for _, nodeInfo := range candidates {
nodes = append(nodes, nodeInfo.Node())
}
return sd.getEmptyNodes(nodes, pods, maxEmptyBulkDelete, noScaleDownLimitsOnResources())
return sd.getEmptyNodes(candidates, pods, maxEmptyBulkDelete, noScaleDownLimitsOnResources())
}
// This functions finds empty nodes among passed candidates and returns a list of empty nodes
// that can be deleted at the same time.
func (sd *ScaleDown) getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDelete int,
func (sd *ScaleDown) getEmptyNodes(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod, maxEmptyBulkDelete int,
resourcesLimits scaleDownResourcesLimits) []*apiv1.Node {
emptyNodes := simulator.FindEmptyNodesToRemove(candidates, pods)

View File

@ -28,7 +28,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
apiv1 "k8s.io/api/core/v1"
@ -113,14 +112,26 @@ type UtilizationInfo struct {
// FindNodesToRemove finds nodes that can be removed. Returns also an information about good
// rescheduling location for each of the pods.
func FindNodesToRemove(candidates []*apiv1.Node, destinationNodes []*apiv1.Node, pods []*apiv1.Pod,
listers kube_util.ListerRegistry, clusterSnapshot ClusterSnapshot, predicateChecker PredicateChecker, maxCount int,
fastCheck bool, oldHints map[string]string, usageTracker *UsageTracker,
func FindNodesToRemove(
candidates []*schedulernodeinfo.NodeInfo,
destinationNodes []*schedulernodeinfo.NodeInfo,
pods []*apiv1.Pod,
listers kube_util.ListerRegistry,
clusterSnapshot ClusterSnapshot,
predicateChecker PredicateChecker,
maxCount int,
fastCheck bool,
oldHints map[string]string,
usageTracker *UsageTracker,
timestamp time.Time,
podDisruptionBudgets []*policyv1.PodDisruptionBudget,
) (nodesToRemove []NodeToBeRemoved, unremovableNodes []*UnremovableNode, podReschedulingHints map[string]string, finalError errors.AutoscalerError) {
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(pods, destinationNodes)
destinations := make(map[string]bool, len(destinationNodes))
for _, node := range destinationNodes {
destinations[node.Node().Name] = true
}
result := make([]NodeToBeRemoved, 0)
unremovable := make([]*UnremovableNode, 0)
@ -131,35 +142,38 @@ func FindNodesToRemove(candidates []*apiv1.Node, destinationNodes []*apiv1.Node,
newHints := make(map[string]string, len(oldHints))
candidateloop:
for _, node := range candidates {
for _, nodeInfo := range candidates {
node := nodeInfo.Node()
klog.V(2).Infof("%s: %s for removal", evaluationType, node.Name)
var podsToRemove []*apiv1.Pod
var blockingPod *drain.BlockingPod
var err error
if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
if fastCheck {
podsToRemove, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
podDisruptionBudgets)
} else {
podsToRemove, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
podDisruptionBudgets)
}
if err != nil {
klog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err)
if blockingPod != nil {
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: BlockedByPod, BlockingPod: blockingPod})
} else {
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: UnexpectedError})
}
continue candidateloop
}
} else {
if _, found := destinations[node.Name]; !found {
klog.V(2).Infof("%s: nodeInfo for %s not found", evaluationType, node.Name)
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: UnexpectedError})
continue candidateloop
}
if fastCheck {
podsToRemove, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
podDisruptionBudgets)
} else {
podsToRemove, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
podDisruptionBudgets)
}
if err != nil {
klog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err)
if blockingPod != nil {
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: BlockedByPod, BlockingPod: blockingPod})
} else {
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: UnexpectedError})
}
continue candidateloop
}
findProblems := findPlaceFor(node.Name, podsToRemove, destinationNodes, clusterSnapshot,
predicateChecker, oldHints, newHints, usageTracker, timestamp)
@ -181,19 +195,13 @@ candidateloop:
}
// FindEmptyNodesToRemove finds empty nodes that can be removed.
func FindEmptyNodesToRemove(candidates []*apiv1.Node, pods []*apiv1.Pod) []*apiv1.Node {
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(pods, candidates)
func FindEmptyNodesToRemove(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod) []*apiv1.Node {
result := make([]*apiv1.Node, 0)
for _, node := range candidates {
if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
// Should block on all pods.
podsToRemove, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
if err == nil && len(podsToRemove) == 0 {
result = append(result, node)
}
} else {
// Node without pods.
result = append(result, node)
for _, nodeInfo := range candidates {
// Should block on all pods.
podsToRemove, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
if err == nil && len(podsToRemove) == 0 {
result = append(result, nodeInfo.Node())
}
}
return result
@ -264,7 +272,7 @@ func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulernodeinf
return float64(podsRequest.MilliValue()) / float64(nodeAllocatable.MilliValue()), nil
}
func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node,
func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*schedulernodeinfo.NodeInfo,
clusterSnaphost ClusterSnapshot, predicateChecker PredicateChecker, oldHints map[string]string, newHints map[string]string, usageTracker *UsageTracker,
timestamp time.Time) error {
@ -332,7 +340,8 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node,
}
}
if !foundPlace {
for _, node := range shuffledNodes {
for _, nodeInfo := range shuffledNodes {
node := nodeInfo.Node()
if node.Name == removedNode {
continue
}
@ -353,8 +362,8 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node,
return nil
}
func shuffleNodes(nodes []*apiv1.Node) []*apiv1.Node {
result := make([]*apiv1.Node, len(nodes))
func shuffleNodes(nodes []*schedulernodeinfo.NodeInfo) []*schedulernodeinfo.NodeInfo {
result := make([]*schedulernodeinfo.NodeInfo, len(nodes))
copy(result, nodes)
rand.Shuffle(len(result), func(i, j int) {
result[i], result[j] = result[j], result[i]

View File

@ -100,14 +100,29 @@ func TestUtilization(t *testing.T) {
assert.Zero(t, utilInfo.Utilization)
}
func nodeInfos(nodes []*apiv1.Node) []*schedulernodeinfo.NodeInfo {
result := make([]*schedulernodeinfo.NodeInfo, len(nodes))
for i, node := range nodes {
ni := schedulernodeinfo.NewNodeInfo()
ni.SetNode(node)
result[i] = ni
}
return result
}
func TestFindPlaceAllOk(t *testing.T) {
node1 := BuildTestNode("n1", 1000, 2000000)
SetNodeReadyState(node1, true, time.Time{})
ni1 := schedulernodeinfo.NewNodeInfo()
ni1.SetNode(node1)
node2 := BuildTestNode("n2", 1000, 2000000)
SetNodeReadyState(node2, true, time.Time{})
ni2 := schedulernodeinfo.NewNodeInfo()
ni2.SetNode(node2)
pod1 := BuildTestPod("p1", 300, 500000)
pod1.Spec.NodeName = "n1"
ni1.AddPod(pod1)
new1 := BuildTestPod("p2", 600, 500000)
new2 := BuildTestPod("p3", 500, 500000)
@ -124,7 +139,7 @@ func TestFindPlaceAllOk(t *testing.T) {
err = findPlaceFor(
"x",
[]*apiv1.Pod{new1, new2},
[]*apiv1.Node{node1, node2},
[]*schedulernodeinfo.NodeInfo{ni1, ni2},
clusterSnapshot,
predicateChecker,
oldHints, newHints, tracker, time.Now())
@ -137,13 +152,20 @@ func TestFindPlaceAllOk(t *testing.T) {
func TestFindPlaceAllBas(t *testing.T) {
nodebad := BuildTestNode("nbad", 1000, 2000000)
nibad := schedulernodeinfo.NewNodeInfo()
nibad.SetNode(nodebad)
node1 := BuildTestNode("n1", 1000, 2000000)
SetNodeReadyState(node1, true, time.Time{})
ni1 := schedulernodeinfo.NewNodeInfo()
ni1.SetNode(node1)
node2 := BuildTestNode("n2", 1000, 2000000)
SetNodeReadyState(node2, true, time.Time{})
ni2 := schedulernodeinfo.NewNodeInfo()
ni2.SetNode(node2)
pod1 := BuildTestPod("p1", 300, 500000)
pod1.Spec.NodeName = "n1"
ni1.AddPod(pod1)
new1 := BuildTestPod("p2", 600, 500000)
new2 := BuildTestPod("p3", 500, 500000)
new3 := BuildTestPod("p4", 700, 500000)
@ -162,7 +184,7 @@ func TestFindPlaceAllBas(t *testing.T) {
err = findPlaceFor(
"nbad",
[]*apiv1.Pod{new1, new2, new3},
[]*apiv1.Node{nodebad, node1, node2},
[]*schedulernodeinfo.NodeInfo{nibad, ni1, ni2},
clusterSnapshot, predicateChecker,
oldHints, newHints, tracker, time.Now())
@ -175,11 +197,16 @@ func TestFindPlaceAllBas(t *testing.T) {
func TestFindNone(t *testing.T) {
node1 := BuildTestNode("n1", 1000, 2000000)
SetNodeReadyState(node1, true, time.Time{})
ni1 := schedulernodeinfo.NewNodeInfo()
ni1.SetNode(node1)
node2 := BuildTestNode("n2", 1000, 2000000)
SetNodeReadyState(node2, true, time.Time{})
ni2 := schedulernodeinfo.NewNodeInfo()
ni2.SetNode(node2)
pod1 := BuildTestPod("p1", 300, 500000)
pod1.Spec.NodeName = "n1"
ni1.AddPod(pod1)
clusterSnapshot := NewBasicClusterSnapshot()
predicateChecker, err := NewTestPredicateChecker()
@ -191,7 +218,7 @@ func TestFindNone(t *testing.T) {
err = findPlaceFor(
"x",
[]*apiv1.Pod{},
[]*apiv1.Node{node1, node2},
[]*schedulernodeinfo.NodeInfo{ni1, ni2},
clusterSnapshot, predicateChecker,
make(map[string]string),
make(map[string]string),
@ -204,11 +231,18 @@ func TestShuffleNodes(t *testing.T) {
nodes := []*apiv1.Node{
BuildTestNode("n1", 0, 0),
BuildTestNode("n2", 0, 0),
BuildTestNode("n3", 0, 0)}
BuildTestNode("n3", 0, 0),
}
nodeInfos := []*schedulernodeinfo.NodeInfo{}
for _, node := range nodes {
ni := schedulernodeinfo.NewNodeInfo()
ni.SetNode(node)
nodeInfos = append(nodeInfos, ni)
}
gotPermutation := false
for i := 0; i < 10000; i++ {
shuffled := shuffleNodes(nodes)
if shuffled[0].Name == "n2" && shuffled[1].Name == "n3" && shuffled[2].Name == "n1" {
shuffled := shuffleNodes(nodeInfos)
if shuffled[0].Node().Name == "n2" && shuffled[1].Node().Name == "n3" && shuffled[2].Node().Name == "n1" {
gotPermutation = true
break
}
@ -217,48 +251,58 @@ func TestShuffleNodes(t *testing.T) {
}
func TestFindEmptyNodes(t *testing.T) {
nodes := []*schedulernodeinfo.NodeInfo{}
for i := 0; i < 4; i++ {
nodeName := fmt.Sprintf("n%d", i)
node := BuildTestNode(nodeName, 1000, 2000000)
SetNodeReadyState(node, true, time.Time{})
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(node)
nodes = append(nodes, nodeInfo)
}
pod1 := BuildTestPod("p1", 300, 500000)
pod1.Spec.NodeName = "n1"
nodes[1].AddPod(pod1)
pod2 := BuildTestPod("p2", 300, 500000)
pod2.Spec.NodeName = "n2"
nodes[2].AddPod(pod2)
pod2.Annotations = map[string]string{
types.ConfigMirrorAnnotationKey: "",
}
node1 := BuildTestNode("n1", 1000, 2000000)
node2 := BuildTestNode("n2", 1000, 2000000)
node3 := BuildTestNode("n3", 1000, 2000000)
node4 := BuildTestNode("n4", 1000, 2000000)
SetNodeReadyState(node1, true, time.Time{})
SetNodeReadyState(node2, true, time.Time{})
SetNodeReadyState(node3, true, time.Time{})
SetNodeReadyState(node4, true, time.Time{})
emptyNodes := FindEmptyNodesToRemove([]*apiv1.Node{node1, node2, node3, node4}, []*apiv1.Pod{pod1, pod2})
assert.Equal(t, []*apiv1.Node{node2, node3, node4}, emptyNodes)
emptyNodes := FindEmptyNodesToRemove(nodes, []*apiv1.Pod{pod1, pod2})
assert.Equal(t, []*apiv1.Node{nodes[0].Node(), nodes[2].Node(), nodes[3].Node()}, emptyNodes)
}
type findNodesToRemoveTestConfig struct {
name string
pods []*apiv1.Pod
candidates []*apiv1.Node
allNodes []*apiv1.Node
candidates []*schedulernodeinfo.NodeInfo
allNodes []*schedulernodeinfo.NodeInfo
toRemove []NodeToBeRemoved
unremovable []*UnremovableNode
}
func TestFindNodesToRemove(t *testing.T) {
emptyNode := BuildTestNode("n1", 1000, 2000000)
emptyNodeInfo := schedulernodeinfo.NewNodeInfo()
emptyNodeInfo.SetNode(emptyNode)
// two small pods backed by ReplicaSet
drainableNode := BuildTestNode("n2", 1000, 2000000)
drainableNodeInfo := schedulernodeinfo.NewNodeInfo()
drainableNodeInfo.SetNode(drainableNode)
// one small pod, not backed by anything
nonDrainableNode := BuildTestNode("n3", 1000, 2000000)
nonDrainableNodeInfo := schedulernodeinfo.NewNodeInfo()
nonDrainableNodeInfo.SetNode(nonDrainableNode)
// one very large pod
fullNode := BuildTestNode("n4", 1000, 2000000)
fullNodeInfo := schedulernodeinfo.NewNodeInfo()
fullNodeInfo.SetNode(fullNode)
SetNodeReadyState(emptyNode, true, time.Time{})
SetNodeReadyState(drainableNode, true, time.Time{})
@ -270,13 +314,20 @@ func TestFindNodesToRemove(t *testing.T) {
pod1 := BuildTestPod("p1", 100, 100000)
pod1.OwnerReferences = ownerRefs
pod1.Spec.NodeName = "n2"
drainableNodeInfo.AddPod(pod1)
pod2 := BuildTestPod("p2", 100, 100000)
pod2.OwnerReferences = ownerRefs
pod2.Spec.NodeName = "n2"
drainableNodeInfo.AddPod(pod2)
pod3 := BuildTestPod("p3", 100, 100000)
pod3.Spec.NodeName = "n3"
nonDrainableNodeInfo.AddPod(pod3)
pod4 := BuildTestPod("p4", 1000, 100000)
pod4.Spec.NodeName = "n4"
fullNodeInfo.AddPod(pod4)
emptyNodeToRemove := NodeToBeRemoved{
Node: emptyNode,
@ -297,8 +348,8 @@ func TestFindNodesToRemove(t *testing.T) {
{
name: "just an empty node, should be removed",
pods: []*apiv1.Pod{},
candidates: []*apiv1.Node{emptyNode},
allNodes: []*apiv1.Node{emptyNode},
candidates: []*schedulernodeinfo.NodeInfo{emptyNodeInfo},
allNodes: []*schedulernodeinfo.NodeInfo{emptyNodeInfo},
toRemove: []NodeToBeRemoved{emptyNodeToRemove},
unremovable: []*UnremovableNode{},
},
@ -306,8 +357,8 @@ func TestFindNodesToRemove(t *testing.T) {
{
name: "just a drainable node, but nowhere for pods to go to",
pods: []*apiv1.Pod{pod1, pod2},
candidates: []*apiv1.Node{drainableNode},
allNodes: []*apiv1.Node{drainableNode},
candidates: []*schedulernodeinfo.NodeInfo{drainableNodeInfo},
allNodes: []*schedulernodeinfo.NodeInfo{drainableNodeInfo},
toRemove: []NodeToBeRemoved{},
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
},
@ -315,8 +366,8 @@ func TestFindNodesToRemove(t *testing.T) {
{
name: "drainable node, and a mostly empty node that can take its pods",
pods: []*apiv1.Pod{pod1, pod2, pod3},
candidates: []*apiv1.Node{drainableNode, nonDrainableNode},
allNodes: []*apiv1.Node{drainableNode, nonDrainableNode},
candidates: []*schedulernodeinfo.NodeInfo{drainableNodeInfo, nonDrainableNodeInfo},
allNodes: []*schedulernodeinfo.NodeInfo{drainableNodeInfo, nonDrainableNodeInfo},
toRemove: []NodeToBeRemoved{drainableNodeToRemove},
unremovable: []*UnremovableNode{{Node: nonDrainableNode, Reason: BlockedByPod, BlockingPod: &drain.BlockingPod{Pod: pod3, Reason: drain.NotReplicated}}},
},
@ -324,8 +375,8 @@ func TestFindNodesToRemove(t *testing.T) {
{
name: "drainable node, and a full node that cannot fit anymore pods",
pods: []*apiv1.Pod{pod1, pod2, pod4},
candidates: []*apiv1.Node{drainableNode},
allNodes: []*apiv1.Node{drainableNode, fullNode},
candidates: []*schedulernodeinfo.NodeInfo{drainableNodeInfo},
allNodes: []*schedulernodeinfo.NodeInfo{drainableNodeInfo, fullNodeInfo},
toRemove: []NodeToBeRemoved{},
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
},
@ -333,22 +384,28 @@ func TestFindNodesToRemove(t *testing.T) {
{
name: "4 nodes, 1 empty, 1 drainable",
pods: []*apiv1.Pod{pod1, pod2, pod3, pod4},
candidates: []*apiv1.Node{emptyNode, drainableNode},
allNodes: []*apiv1.Node{emptyNode, drainableNode, fullNode, nonDrainableNode},
candidates: []*schedulernodeinfo.NodeInfo{emptyNodeInfo, drainableNodeInfo},
allNodes: []*schedulernodeinfo.NodeInfo{emptyNodeInfo, drainableNodeInfo, fullNodeInfo, nonDrainableNodeInfo},
toRemove: []NodeToBeRemoved{emptyNodeToRemove, drainableNodeToRemove},
unremovable: []*UnremovableNode{},
},
}
for _, test := range tests {
InitializeClusterSnapshotOrDie(t, clusterSnapshot, test.allNodes, test.pods)
toRemove, unremovable, _, err := FindNodesToRemove(
test.candidates, test.allNodes, test.pods, nil,
clusterSnapshot, predicateChecker, len(test.allNodes), true, map[string]string{},
tracker, time.Now(), []*policyv1.PodDisruptionBudget{})
assert.NoError(t, err)
fmt.Printf("Test scenario: %s, found len(toRemove)=%v, expected len(test.toRemove)=%v\n", test.name, len(toRemove), len(test.toRemove))
assert.Equal(t, toRemove, test.toRemove)
assert.Equal(t, unremovable, test.unremovable)
t.Run(test.name, func(t *testing.T) {
allNodesForSnapshot := []*apiv1.Node{}
for _, node := range test.allNodes {
allNodesForSnapshot = append(allNodesForSnapshot, node.Node())
}
InitializeClusterSnapshotOrDie(t, clusterSnapshot, allNodesForSnapshot, test.pods)
toRemove, unremovable, _, err := FindNodesToRemove(
test.candidates, test.allNodes, test.pods, nil,
clusterSnapshot, predicateChecker, len(test.allNodes), true, map[string]string{},
tracker, time.Now(), []*policyv1.PodDisruptionBudget{})
assert.NoError(t, err)
fmt.Printf("Test scenario: %s, found len(toRemove)=%v, expected len(test.toRemove)=%v\n", test.name, len(toRemove), len(test.toRemove))
assert.Equal(t, toRemove, test.toRemove)
assert.Equal(t, unremovable, test.unremovable)
})
}
}