pass node infos to simulator
This commit is contained in:
parent
ef9d895463
commit
d11b39603d
|
|
@ -436,7 +436,17 @@ func (sd *ScaleDown) UpdateUnneededNodes(
|
||||||
}
|
}
|
||||||
candidateNames[candidate.Name] = true
|
candidateNames[candidate.Name] = true
|
||||||
}
|
}
|
||||||
|
destinationNames := make(map[string]bool, len(destinationNodes))
|
||||||
|
for _, destination := range destinationNodes {
|
||||||
|
if destination == nil {
|
||||||
|
// Do we need to check this?
|
||||||
|
klog.Errorf("Unexpected nil node in node info")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
destinationNames[destination.Name] = true
|
||||||
|
}
|
||||||
candidateNodeInfos := make([]*schedulernodeinfo.NodeInfo, 0, len(candidateNames))
|
candidateNodeInfos := make([]*schedulernodeinfo.NodeInfo, 0, len(candidateNames))
|
||||||
|
destinationNodeInfos := make([]*schedulernodeinfo.NodeInfo, 0, len(destinationNames))
|
||||||
for _, nodeInfo := range allNodeInfos {
|
for _, nodeInfo := range allNodeInfos {
|
||||||
if nodeInfo.Node() == nil {
|
if nodeInfo.Node() == nil {
|
||||||
// Do we need to check this?
|
// Do we need to check this?
|
||||||
|
|
@ -446,6 +456,9 @@ func (sd *ScaleDown) UpdateUnneededNodes(
|
||||||
if candidateNames[nodeInfo.Node().Name] {
|
if candidateNames[nodeInfo.Node().Name] {
|
||||||
candidateNodeInfos = append(candidateNodeInfos, nodeInfo)
|
candidateNodeInfos = append(candidateNodeInfos, nodeInfo)
|
||||||
}
|
}
|
||||||
|
if destinationNames[nodeInfo.Node().Name] {
|
||||||
|
destinationNodeInfos = append(destinationNodeInfos, nodeInfo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sd.updateUnremovableNodes(allNodeInfos)
|
sd.updateUnremovableNodes(allNodeInfos)
|
||||||
|
|
@ -523,7 +536,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
|
||||||
// Look for nodes to remove in the current candidates
|
// Look for nodes to remove in the current candidates
|
||||||
nodesToRemove, unremovable, newHints, simulatorErr := simulator.FindNodesToRemove(
|
nodesToRemove, unremovable, newHints, simulatorErr := simulator.FindNodesToRemove(
|
||||||
currentCandidates,
|
currentCandidates,
|
||||||
destinationNodes,
|
destinationNodeInfos,
|
||||||
pods,
|
pods,
|
||||||
nil,
|
nil,
|
||||||
sd.context.ClusterSnapshot,
|
sd.context.ClusterSnapshot,
|
||||||
|
|
@ -556,7 +569,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
|
||||||
additionalNodesToRemove, additionalUnremovable, additionalNewHints, simulatorErr :=
|
additionalNodesToRemove, additionalUnremovable, additionalNewHints, simulatorErr :=
|
||||||
simulator.FindNodesToRemove(
|
simulator.FindNodesToRemove(
|
||||||
currentNonCandidates[:additionalCandidatesPoolSize],
|
currentNonCandidates[:additionalCandidatesPoolSize],
|
||||||
destinationNodes,
|
destinationNodeInfos,
|
||||||
pods,
|
pods,
|
||||||
nil,
|
nil,
|
||||||
sd.context.ClusterSnapshot,
|
sd.context.ClusterSnapshot,
|
||||||
|
|
@ -687,24 +700,20 @@ func (sd *ScaleDown) markSimulationError(simulatorErr errors.AutoscalerError,
|
||||||
// chooseCandidates splits nodes into current candidates for scale-down and the
|
// chooseCandidates splits nodes into current candidates for scale-down and the
|
||||||
// rest. Current candidates are unneeded nodes from the previous run that are
|
// rest. Current candidates are unneeded nodes from the previous run that are
|
||||||
// still in the nodes list.
|
// still in the nodes list.
|
||||||
func (sd *ScaleDown) chooseCandidates(nodes []*schedulernodeinfo.NodeInfo) ([]*apiv1.Node, []*apiv1.Node) {
|
func (sd *ScaleDown) chooseCandidates(nodes []*schedulernodeinfo.NodeInfo) ([]*schedulernodeinfo.NodeInfo, []*schedulernodeinfo.NodeInfo) {
|
||||||
// Number of candidates should not be capped. We will look for nodes to remove
|
// Number of candidates should not be capped. We will look for nodes to remove
|
||||||
// from the whole set of nodes.
|
// from the whole set of nodes.
|
||||||
if sd.context.ScaleDownNonEmptyCandidatesCount <= 0 {
|
if sd.context.ScaleDownNonEmptyCandidatesCount <= 0 {
|
||||||
candidates := make([]*apiv1.Node, len(nodes))
|
return nodes, nil
|
||||||
for i, node := range nodes {
|
|
||||||
candidates[i] = node.Node()
|
|
||||||
}
|
}
|
||||||
return candidates, []*apiv1.Node{}
|
currentCandidates := make([]*schedulernodeinfo.NodeInfo, 0, len(sd.unneededNodesList))
|
||||||
}
|
currentNonCandidates := make([]*schedulernodeinfo.NodeInfo, 0, len(nodes))
|
||||||
currentCandidates := make([]*apiv1.Node, 0, len(sd.unneededNodesList))
|
|
||||||
currentNonCandidates := make([]*apiv1.Node, 0, len(nodes))
|
|
||||||
for _, nodeInfo := range nodes {
|
for _, nodeInfo := range nodes {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if _, found := sd.unneededNodes[node.Name]; found {
|
if _, found := sd.unneededNodes[node.Name]; found {
|
||||||
currentCandidates = append(currentCandidates, node)
|
currentCandidates = append(currentCandidates, nodeInfo)
|
||||||
} else {
|
} else {
|
||||||
currentNonCandidates = append(currentNonCandidates, node)
|
currentNonCandidates = append(currentNonCandidates, nodeInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return currentCandidates, currentNonCandidates
|
return currentCandidates, currentNonCandidates
|
||||||
|
|
@ -786,12 +795,8 @@ func (sd *ScaleDown) TryToScaleDown(
|
||||||
}
|
}
|
||||||
|
|
||||||
nodesWithoutMaster := filterOutMasters(allNodeInfos, pods)
|
nodesWithoutMaster := filterOutMasters(allNodeInfos, pods)
|
||||||
nodes := make([]*apiv1.Node, len(nodesWithoutMaster))
|
|
||||||
for i, nodeInfo := range nodesWithoutMaster {
|
|
||||||
nodes[i] = nodeInfo.Node()
|
|
||||||
}
|
|
||||||
|
|
||||||
candidates := make([]*apiv1.Node, 0)
|
candidates := make([]*schedulernodeinfo.NodeInfo, 0)
|
||||||
readinessMap := make(map[string]bool)
|
readinessMap := make(map[string]bool)
|
||||||
candidateNodeGroups := make(map[string]cloudprovider.NodeGroup)
|
candidateNodeGroups := make(map[string]cloudprovider.NodeGroup)
|
||||||
gpuLabel := sd.context.CloudProvider.GPULabel()
|
gpuLabel := sd.context.CloudProvider.GPULabel()
|
||||||
|
|
@ -880,9 +885,8 @@ func (sd *ScaleDown) TryToScaleDown(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
candidates = append(candidates, node)
|
candidates = append(candidates, nodeInfo)
|
||||||
candidateNodeGroups[node.Name] = nodeGroup
|
candidateNodeGroups[node.Name] = nodeGroup
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(candidates) == 0 {
|
if len(candidates) == 0 {
|
||||||
|
|
@ -918,7 +922,7 @@ func (sd *ScaleDown) TryToScaleDown(
|
||||||
// We look for only 1 node so new hints may be incomplete.
|
// We look for only 1 node so new hints may be incomplete.
|
||||||
nodesToRemove, unremovable, _, err := simulator.FindNodesToRemove(
|
nodesToRemove, unremovable, _, err := simulator.FindNodesToRemove(
|
||||||
candidates,
|
candidates,
|
||||||
nodes,
|
nodesWithoutMaster,
|
||||||
pods,
|
pods,
|
||||||
sd.context.ListerRegistry,
|
sd.context.ListerRegistry,
|
||||||
sd.context.ClusterSnapshot,
|
sd.context.ClusterSnapshot,
|
||||||
|
|
@ -1002,16 +1006,12 @@ func updateScaleDownMetrics(scaleDownStart time.Time, findNodesToRemoveDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sd *ScaleDown) getEmptyNodesNoResourceLimits(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod, maxEmptyBulkDelete int) []*apiv1.Node {
|
func (sd *ScaleDown) getEmptyNodesNoResourceLimits(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod, maxEmptyBulkDelete int) []*apiv1.Node {
|
||||||
nodes := make([]*apiv1.Node, 0, len(candidates))
|
return sd.getEmptyNodes(candidates, pods, maxEmptyBulkDelete, noScaleDownLimitsOnResources())
|
||||||
for _, nodeInfo := range candidates {
|
|
||||||
nodes = append(nodes, nodeInfo.Node())
|
|
||||||
}
|
|
||||||
return sd.getEmptyNodes(nodes, pods, maxEmptyBulkDelete, noScaleDownLimitsOnResources())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This functions finds empty nodes among passed candidates and returns a list of empty nodes
|
// This functions finds empty nodes among passed candidates and returns a list of empty nodes
|
||||||
// that can be deleted at the same time.
|
// that can be deleted at the same time.
|
||||||
func (sd *ScaleDown) getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDelete int,
|
func (sd *ScaleDown) getEmptyNodes(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod, maxEmptyBulkDelete int,
|
||||||
resourcesLimits scaleDownResourcesLimits) []*apiv1.Node {
|
resourcesLimits scaleDownResourcesLimits) []*apiv1.Node {
|
||||||
|
|
||||||
emptyNodes := simulator.FindEmptyNodesToRemove(candidates, pods)
|
emptyNodes := simulator.FindEmptyNodesToRemove(candidates, pods)
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||||
scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
|
@ -113,14 +112,26 @@ type UtilizationInfo struct {
|
||||||
|
|
||||||
// FindNodesToRemove finds nodes that can be removed. Returns also an information about good
|
// FindNodesToRemove finds nodes that can be removed. Returns also an information about good
|
||||||
// rescheduling location for each of the pods.
|
// rescheduling location for each of the pods.
|
||||||
func FindNodesToRemove(candidates []*apiv1.Node, destinationNodes []*apiv1.Node, pods []*apiv1.Pod,
|
func FindNodesToRemove(
|
||||||
listers kube_util.ListerRegistry, clusterSnapshot ClusterSnapshot, predicateChecker PredicateChecker, maxCount int,
|
candidates []*schedulernodeinfo.NodeInfo,
|
||||||
fastCheck bool, oldHints map[string]string, usageTracker *UsageTracker,
|
destinationNodes []*schedulernodeinfo.NodeInfo,
|
||||||
|
pods []*apiv1.Pod,
|
||||||
|
listers kube_util.ListerRegistry,
|
||||||
|
clusterSnapshot ClusterSnapshot,
|
||||||
|
predicateChecker PredicateChecker,
|
||||||
|
maxCount int,
|
||||||
|
fastCheck bool,
|
||||||
|
oldHints map[string]string,
|
||||||
|
usageTracker *UsageTracker,
|
||||||
timestamp time.Time,
|
timestamp time.Time,
|
||||||
podDisruptionBudgets []*policyv1.PodDisruptionBudget,
|
podDisruptionBudgets []*policyv1.PodDisruptionBudget,
|
||||||
) (nodesToRemove []NodeToBeRemoved, unremovableNodes []*UnremovableNode, podReschedulingHints map[string]string, finalError errors.AutoscalerError) {
|
) (nodesToRemove []NodeToBeRemoved, unremovableNodes []*UnremovableNode, podReschedulingHints map[string]string, finalError errors.AutoscalerError) {
|
||||||
|
|
||||||
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(pods, destinationNodes)
|
destinations := make(map[string]bool, len(destinationNodes))
|
||||||
|
for _, node := range destinationNodes {
|
||||||
|
destinations[node.Node().Name] = true
|
||||||
|
}
|
||||||
|
|
||||||
result := make([]NodeToBeRemoved, 0)
|
result := make([]NodeToBeRemoved, 0)
|
||||||
unremovable := make([]*UnremovableNode, 0)
|
unremovable := make([]*UnremovableNode, 0)
|
||||||
|
|
||||||
|
|
@ -131,14 +142,20 @@ func FindNodesToRemove(candidates []*apiv1.Node, destinationNodes []*apiv1.Node,
|
||||||
newHints := make(map[string]string, len(oldHints))
|
newHints := make(map[string]string, len(oldHints))
|
||||||
|
|
||||||
candidateloop:
|
candidateloop:
|
||||||
for _, node := range candidates {
|
for _, nodeInfo := range candidates {
|
||||||
|
node := nodeInfo.Node()
|
||||||
klog.V(2).Infof("%s: %s for removal", evaluationType, node.Name)
|
klog.V(2).Infof("%s: %s for removal", evaluationType, node.Name)
|
||||||
|
|
||||||
var podsToRemove []*apiv1.Pod
|
var podsToRemove []*apiv1.Pod
|
||||||
var blockingPod *drain.BlockingPod
|
var blockingPod *drain.BlockingPod
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
|
if _, found := destinations[node.Name]; !found {
|
||||||
|
klog.V(2).Infof("%s: nodeInfo for %s not found", evaluationType, node.Name)
|
||||||
|
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: UnexpectedError})
|
||||||
|
continue candidateloop
|
||||||
|
}
|
||||||
|
|
||||||
if fastCheck {
|
if fastCheck {
|
||||||
podsToRemove, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
|
podsToRemove, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
|
||||||
podDisruptionBudgets)
|
podDisruptionBudgets)
|
||||||
|
|
@ -146,6 +163,7 @@ candidateloop:
|
||||||
podsToRemove, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
|
podsToRemove, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
|
||||||
podDisruptionBudgets)
|
podDisruptionBudgets)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err)
|
klog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err)
|
||||||
if blockingPod != nil {
|
if blockingPod != nil {
|
||||||
|
|
@ -155,11 +173,7 @@ candidateloop:
|
||||||
}
|
}
|
||||||
continue candidateloop
|
continue candidateloop
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
klog.V(2).Infof("%s: nodeInfo for %s not found", evaluationType, node.Name)
|
|
||||||
unremovable = append(unremovable, &UnremovableNode{Node: node, Reason: UnexpectedError})
|
|
||||||
continue candidateloop
|
|
||||||
}
|
|
||||||
findProblems := findPlaceFor(node.Name, podsToRemove, destinationNodes, clusterSnapshot,
|
findProblems := findPlaceFor(node.Name, podsToRemove, destinationNodes, clusterSnapshot,
|
||||||
predicateChecker, oldHints, newHints, usageTracker, timestamp)
|
predicateChecker, oldHints, newHints, usageTracker, timestamp)
|
||||||
|
|
||||||
|
|
@ -181,19 +195,13 @@ candidateloop:
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindEmptyNodesToRemove finds empty nodes that can be removed.
|
// FindEmptyNodesToRemove finds empty nodes that can be removed.
|
||||||
func FindEmptyNodesToRemove(candidates []*apiv1.Node, pods []*apiv1.Pod) []*apiv1.Node {
|
func FindEmptyNodesToRemove(candidates []*schedulernodeinfo.NodeInfo, pods []*apiv1.Pod) []*apiv1.Node {
|
||||||
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(pods, candidates)
|
|
||||||
result := make([]*apiv1.Node, 0)
|
result := make([]*apiv1.Node, 0)
|
||||||
for _, node := range candidates {
|
for _, nodeInfo := range candidates {
|
||||||
if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
|
|
||||||
// Should block on all pods.
|
// Should block on all pods.
|
||||||
podsToRemove, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
|
podsToRemove, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
|
||||||
if err == nil && len(podsToRemove) == 0 {
|
if err == nil && len(podsToRemove) == 0 {
|
||||||
result = append(result, node)
|
result = append(result, nodeInfo.Node())
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Node without pods.
|
|
||||||
result = append(result, node)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
|
@ -264,7 +272,7 @@ func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulernodeinf
|
||||||
return float64(podsRequest.MilliValue()) / float64(nodeAllocatable.MilliValue()), nil
|
return float64(podsRequest.MilliValue()) / float64(nodeAllocatable.MilliValue()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node,
|
func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*schedulernodeinfo.NodeInfo,
|
||||||
clusterSnaphost ClusterSnapshot, predicateChecker PredicateChecker, oldHints map[string]string, newHints map[string]string, usageTracker *UsageTracker,
|
clusterSnaphost ClusterSnapshot, predicateChecker PredicateChecker, oldHints map[string]string, newHints map[string]string, usageTracker *UsageTracker,
|
||||||
timestamp time.Time) error {
|
timestamp time.Time) error {
|
||||||
|
|
||||||
|
|
@ -332,7 +340,8 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !foundPlace {
|
if !foundPlace {
|
||||||
for _, node := range shuffledNodes {
|
for _, nodeInfo := range shuffledNodes {
|
||||||
|
node := nodeInfo.Node()
|
||||||
if node.Name == removedNode {
|
if node.Name == removedNode {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -353,8 +362,8 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func shuffleNodes(nodes []*apiv1.Node) []*apiv1.Node {
|
func shuffleNodes(nodes []*schedulernodeinfo.NodeInfo) []*schedulernodeinfo.NodeInfo {
|
||||||
result := make([]*apiv1.Node, len(nodes))
|
result := make([]*schedulernodeinfo.NodeInfo, len(nodes))
|
||||||
copy(result, nodes)
|
copy(result, nodes)
|
||||||
rand.Shuffle(len(result), func(i, j int) {
|
rand.Shuffle(len(result), func(i, j int) {
|
||||||
result[i], result[j] = result[j], result[i]
|
result[i], result[j] = result[j], result[i]
|
||||||
|
|
|
||||||
|
|
@ -100,14 +100,29 @@ func TestUtilization(t *testing.T) {
|
||||||
assert.Zero(t, utilInfo.Utilization)
|
assert.Zero(t, utilInfo.Utilization)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nodeInfos(nodes []*apiv1.Node) []*schedulernodeinfo.NodeInfo {
|
||||||
|
result := make([]*schedulernodeinfo.NodeInfo, len(nodes))
|
||||||
|
for i, node := range nodes {
|
||||||
|
ni := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni.SetNode(node)
|
||||||
|
result[i] = ni
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func TestFindPlaceAllOk(t *testing.T) {
|
func TestFindPlaceAllOk(t *testing.T) {
|
||||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||||
SetNodeReadyState(node1, true, time.Time{})
|
SetNodeReadyState(node1, true, time.Time{})
|
||||||
|
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni1.SetNode(node1)
|
||||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
node2 := BuildTestNode("n2", 1000, 2000000)
|
||||||
SetNodeReadyState(node2, true, time.Time{})
|
SetNodeReadyState(node2, true, time.Time{})
|
||||||
|
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni2.SetNode(node2)
|
||||||
|
|
||||||
pod1 := BuildTestPod("p1", 300, 500000)
|
pod1 := BuildTestPod("p1", 300, 500000)
|
||||||
pod1.Spec.NodeName = "n1"
|
pod1.Spec.NodeName = "n1"
|
||||||
|
ni1.AddPod(pod1)
|
||||||
new1 := BuildTestPod("p2", 600, 500000)
|
new1 := BuildTestPod("p2", 600, 500000)
|
||||||
new2 := BuildTestPod("p3", 500, 500000)
|
new2 := BuildTestPod("p3", 500, 500000)
|
||||||
|
|
||||||
|
|
@ -124,7 +139,7 @@ func TestFindPlaceAllOk(t *testing.T) {
|
||||||
err = findPlaceFor(
|
err = findPlaceFor(
|
||||||
"x",
|
"x",
|
||||||
[]*apiv1.Pod{new1, new2},
|
[]*apiv1.Pod{new1, new2},
|
||||||
[]*apiv1.Node{node1, node2},
|
[]*schedulernodeinfo.NodeInfo{ni1, ni2},
|
||||||
clusterSnapshot,
|
clusterSnapshot,
|
||||||
predicateChecker,
|
predicateChecker,
|
||||||
oldHints, newHints, tracker, time.Now())
|
oldHints, newHints, tracker, time.Now())
|
||||||
|
|
@ -137,13 +152,20 @@ func TestFindPlaceAllOk(t *testing.T) {
|
||||||
|
|
||||||
func TestFindPlaceAllBas(t *testing.T) {
|
func TestFindPlaceAllBas(t *testing.T) {
|
||||||
nodebad := BuildTestNode("nbad", 1000, 2000000)
|
nodebad := BuildTestNode("nbad", 1000, 2000000)
|
||||||
|
nibad := schedulernodeinfo.NewNodeInfo()
|
||||||
|
nibad.SetNode(nodebad)
|
||||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||||
SetNodeReadyState(node1, true, time.Time{})
|
SetNodeReadyState(node1, true, time.Time{})
|
||||||
|
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni1.SetNode(node1)
|
||||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
node2 := BuildTestNode("n2", 1000, 2000000)
|
||||||
SetNodeReadyState(node2, true, time.Time{})
|
SetNodeReadyState(node2, true, time.Time{})
|
||||||
|
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni2.SetNode(node2)
|
||||||
|
|
||||||
pod1 := BuildTestPod("p1", 300, 500000)
|
pod1 := BuildTestPod("p1", 300, 500000)
|
||||||
pod1.Spec.NodeName = "n1"
|
pod1.Spec.NodeName = "n1"
|
||||||
|
ni1.AddPod(pod1)
|
||||||
new1 := BuildTestPod("p2", 600, 500000)
|
new1 := BuildTestPod("p2", 600, 500000)
|
||||||
new2 := BuildTestPod("p3", 500, 500000)
|
new2 := BuildTestPod("p3", 500, 500000)
|
||||||
new3 := BuildTestPod("p4", 700, 500000)
|
new3 := BuildTestPod("p4", 700, 500000)
|
||||||
|
|
@ -162,7 +184,7 @@ func TestFindPlaceAllBas(t *testing.T) {
|
||||||
err = findPlaceFor(
|
err = findPlaceFor(
|
||||||
"nbad",
|
"nbad",
|
||||||
[]*apiv1.Pod{new1, new2, new3},
|
[]*apiv1.Pod{new1, new2, new3},
|
||||||
[]*apiv1.Node{nodebad, node1, node2},
|
[]*schedulernodeinfo.NodeInfo{nibad, ni1, ni2},
|
||||||
clusterSnapshot, predicateChecker,
|
clusterSnapshot, predicateChecker,
|
||||||
oldHints, newHints, tracker, time.Now())
|
oldHints, newHints, tracker, time.Now())
|
||||||
|
|
||||||
|
|
@ -175,11 +197,16 @@ func TestFindPlaceAllBas(t *testing.T) {
|
||||||
func TestFindNone(t *testing.T) {
|
func TestFindNone(t *testing.T) {
|
||||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||||
SetNodeReadyState(node1, true, time.Time{})
|
SetNodeReadyState(node1, true, time.Time{})
|
||||||
|
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni1.SetNode(node1)
|
||||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
node2 := BuildTestNode("n2", 1000, 2000000)
|
||||||
SetNodeReadyState(node2, true, time.Time{})
|
SetNodeReadyState(node2, true, time.Time{})
|
||||||
|
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni2.SetNode(node2)
|
||||||
|
|
||||||
pod1 := BuildTestPod("p1", 300, 500000)
|
pod1 := BuildTestPod("p1", 300, 500000)
|
||||||
pod1.Spec.NodeName = "n1"
|
pod1.Spec.NodeName = "n1"
|
||||||
|
ni1.AddPod(pod1)
|
||||||
|
|
||||||
clusterSnapshot := NewBasicClusterSnapshot()
|
clusterSnapshot := NewBasicClusterSnapshot()
|
||||||
predicateChecker, err := NewTestPredicateChecker()
|
predicateChecker, err := NewTestPredicateChecker()
|
||||||
|
|
@ -191,7 +218,7 @@ func TestFindNone(t *testing.T) {
|
||||||
err = findPlaceFor(
|
err = findPlaceFor(
|
||||||
"x",
|
"x",
|
||||||
[]*apiv1.Pod{},
|
[]*apiv1.Pod{},
|
||||||
[]*apiv1.Node{node1, node2},
|
[]*schedulernodeinfo.NodeInfo{ni1, ni2},
|
||||||
clusterSnapshot, predicateChecker,
|
clusterSnapshot, predicateChecker,
|
||||||
make(map[string]string),
|
make(map[string]string),
|
||||||
make(map[string]string),
|
make(map[string]string),
|
||||||
|
|
@ -204,11 +231,18 @@ func TestShuffleNodes(t *testing.T) {
|
||||||
nodes := []*apiv1.Node{
|
nodes := []*apiv1.Node{
|
||||||
BuildTestNode("n1", 0, 0),
|
BuildTestNode("n1", 0, 0),
|
||||||
BuildTestNode("n2", 0, 0),
|
BuildTestNode("n2", 0, 0),
|
||||||
BuildTestNode("n3", 0, 0)}
|
BuildTestNode("n3", 0, 0),
|
||||||
|
}
|
||||||
|
nodeInfos := []*schedulernodeinfo.NodeInfo{}
|
||||||
|
for _, node := range nodes {
|
||||||
|
ni := schedulernodeinfo.NewNodeInfo()
|
||||||
|
ni.SetNode(node)
|
||||||
|
nodeInfos = append(nodeInfos, ni)
|
||||||
|
}
|
||||||
gotPermutation := false
|
gotPermutation := false
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
shuffled := shuffleNodes(nodes)
|
shuffled := shuffleNodes(nodeInfos)
|
||||||
if shuffled[0].Name == "n2" && shuffled[1].Name == "n3" && shuffled[2].Name == "n1" {
|
if shuffled[0].Node().Name == "n2" && shuffled[1].Node().Name == "n3" && shuffled[2].Node().Name == "n1" {
|
||||||
gotPermutation = true
|
gotPermutation = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
@ -217,48 +251,58 @@ func TestShuffleNodes(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindEmptyNodes(t *testing.T) {
|
func TestFindEmptyNodes(t *testing.T) {
|
||||||
|
nodes := []*schedulernodeinfo.NodeInfo{}
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
nodeName := fmt.Sprintf("n%d", i)
|
||||||
|
node := BuildTestNode(nodeName, 1000, 2000000)
|
||||||
|
SetNodeReadyState(node, true, time.Time{})
|
||||||
|
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
nodeInfo.SetNode(node)
|
||||||
|
nodes = append(nodes, nodeInfo)
|
||||||
|
}
|
||||||
|
|
||||||
pod1 := BuildTestPod("p1", 300, 500000)
|
pod1 := BuildTestPod("p1", 300, 500000)
|
||||||
pod1.Spec.NodeName = "n1"
|
pod1.Spec.NodeName = "n1"
|
||||||
|
nodes[1].AddPod(pod1)
|
||||||
pod2 := BuildTestPod("p2", 300, 500000)
|
pod2 := BuildTestPod("p2", 300, 500000)
|
||||||
pod2.Spec.NodeName = "n2"
|
pod2.Spec.NodeName = "n2"
|
||||||
|
nodes[2].AddPod(pod2)
|
||||||
pod2.Annotations = map[string]string{
|
pod2.Annotations = map[string]string{
|
||||||
types.ConfigMirrorAnnotationKey: "",
|
types.ConfigMirrorAnnotationKey: "",
|
||||||
}
|
}
|
||||||
|
|
||||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
emptyNodes := FindEmptyNodesToRemove(nodes, []*apiv1.Pod{pod1, pod2})
|
||||||
node2 := BuildTestNode("n2", 1000, 2000000)
|
assert.Equal(t, []*apiv1.Node{nodes[0].Node(), nodes[2].Node(), nodes[3].Node()}, emptyNodes)
|
||||||
node3 := BuildTestNode("n3", 1000, 2000000)
|
|
||||||
node4 := BuildTestNode("n4", 1000, 2000000)
|
|
||||||
|
|
||||||
SetNodeReadyState(node1, true, time.Time{})
|
|
||||||
SetNodeReadyState(node2, true, time.Time{})
|
|
||||||
SetNodeReadyState(node3, true, time.Time{})
|
|
||||||
SetNodeReadyState(node4, true, time.Time{})
|
|
||||||
|
|
||||||
emptyNodes := FindEmptyNodesToRemove([]*apiv1.Node{node1, node2, node3, node4}, []*apiv1.Pod{pod1, pod2})
|
|
||||||
assert.Equal(t, []*apiv1.Node{node2, node3, node4}, emptyNodes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type findNodesToRemoveTestConfig struct {
|
type findNodesToRemoveTestConfig struct {
|
||||||
name string
|
name string
|
||||||
pods []*apiv1.Pod
|
pods []*apiv1.Pod
|
||||||
candidates []*apiv1.Node
|
candidates []*schedulernodeinfo.NodeInfo
|
||||||
allNodes []*apiv1.Node
|
allNodes []*schedulernodeinfo.NodeInfo
|
||||||
toRemove []NodeToBeRemoved
|
toRemove []NodeToBeRemoved
|
||||||
unremovable []*UnremovableNode
|
unremovable []*UnremovableNode
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindNodesToRemove(t *testing.T) {
|
func TestFindNodesToRemove(t *testing.T) {
|
||||||
emptyNode := BuildTestNode("n1", 1000, 2000000)
|
emptyNode := BuildTestNode("n1", 1000, 2000000)
|
||||||
|
emptyNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
emptyNodeInfo.SetNode(emptyNode)
|
||||||
|
|
||||||
// two small pods backed by ReplicaSet
|
// two small pods backed by ReplicaSet
|
||||||
drainableNode := BuildTestNode("n2", 1000, 2000000)
|
drainableNode := BuildTestNode("n2", 1000, 2000000)
|
||||||
|
drainableNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
drainableNodeInfo.SetNode(drainableNode)
|
||||||
|
|
||||||
// one small pod, not backed by anything
|
// one small pod, not backed by anything
|
||||||
nonDrainableNode := BuildTestNode("n3", 1000, 2000000)
|
nonDrainableNode := BuildTestNode("n3", 1000, 2000000)
|
||||||
|
nonDrainableNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
nonDrainableNodeInfo.SetNode(nonDrainableNode)
|
||||||
|
|
||||||
// one very large pod
|
// one very large pod
|
||||||
fullNode := BuildTestNode("n4", 1000, 2000000)
|
fullNode := BuildTestNode("n4", 1000, 2000000)
|
||||||
|
fullNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
fullNodeInfo.SetNode(fullNode)
|
||||||
|
|
||||||
SetNodeReadyState(emptyNode, true, time.Time{})
|
SetNodeReadyState(emptyNode, true, time.Time{})
|
||||||
SetNodeReadyState(drainableNode, true, time.Time{})
|
SetNodeReadyState(drainableNode, true, time.Time{})
|
||||||
|
|
@ -270,13 +314,20 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
pod1 := BuildTestPod("p1", 100, 100000)
|
pod1 := BuildTestPod("p1", 100, 100000)
|
||||||
pod1.OwnerReferences = ownerRefs
|
pod1.OwnerReferences = ownerRefs
|
||||||
pod1.Spec.NodeName = "n2"
|
pod1.Spec.NodeName = "n2"
|
||||||
|
drainableNodeInfo.AddPod(pod1)
|
||||||
|
|
||||||
pod2 := BuildTestPod("p2", 100, 100000)
|
pod2 := BuildTestPod("p2", 100, 100000)
|
||||||
pod2.OwnerReferences = ownerRefs
|
pod2.OwnerReferences = ownerRefs
|
||||||
pod2.Spec.NodeName = "n2"
|
pod2.Spec.NodeName = "n2"
|
||||||
|
drainableNodeInfo.AddPod(pod2)
|
||||||
|
|
||||||
pod3 := BuildTestPod("p3", 100, 100000)
|
pod3 := BuildTestPod("p3", 100, 100000)
|
||||||
pod3.Spec.NodeName = "n3"
|
pod3.Spec.NodeName = "n3"
|
||||||
|
nonDrainableNodeInfo.AddPod(pod3)
|
||||||
|
|
||||||
pod4 := BuildTestPod("p4", 1000, 100000)
|
pod4 := BuildTestPod("p4", 1000, 100000)
|
||||||
pod4.Spec.NodeName = "n4"
|
pod4.Spec.NodeName = "n4"
|
||||||
|
fullNodeInfo.AddPod(pod4)
|
||||||
|
|
||||||
emptyNodeToRemove := NodeToBeRemoved{
|
emptyNodeToRemove := NodeToBeRemoved{
|
||||||
Node: emptyNode,
|
Node: emptyNode,
|
||||||
|
|
@ -297,8 +348,8 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "just an empty node, should be removed",
|
name: "just an empty node, should be removed",
|
||||||
pods: []*apiv1.Pod{},
|
pods: []*apiv1.Pod{},
|
||||||
candidates: []*apiv1.Node{emptyNode},
|
candidates: []*schedulernodeinfo.NodeInfo{emptyNodeInfo},
|
||||||
allNodes: []*apiv1.Node{emptyNode},
|
allNodes: []*schedulernodeinfo.NodeInfo{emptyNodeInfo},
|
||||||
toRemove: []NodeToBeRemoved{emptyNodeToRemove},
|
toRemove: []NodeToBeRemoved{emptyNodeToRemove},
|
||||||
unremovable: []*UnremovableNode{},
|
unremovable: []*UnremovableNode{},
|
||||||
},
|
},
|
||||||
|
|
@ -306,8 +357,8 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "just a drainable node, but nowhere for pods to go to",
|
name: "just a drainable node, but nowhere for pods to go to",
|
||||||
pods: []*apiv1.Pod{pod1, pod2},
|
pods: []*apiv1.Pod{pod1, pod2},
|
||||||
candidates: []*apiv1.Node{drainableNode},
|
candidates: []*schedulernodeinfo.NodeInfo{drainableNodeInfo},
|
||||||
allNodes: []*apiv1.Node{drainableNode},
|
allNodes: []*schedulernodeinfo.NodeInfo{drainableNodeInfo},
|
||||||
toRemove: []NodeToBeRemoved{},
|
toRemove: []NodeToBeRemoved{},
|
||||||
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
|
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
|
||||||
},
|
},
|
||||||
|
|
@ -315,8 +366,8 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "drainable node, and a mostly empty node that can take its pods",
|
name: "drainable node, and a mostly empty node that can take its pods",
|
||||||
pods: []*apiv1.Pod{pod1, pod2, pod3},
|
pods: []*apiv1.Pod{pod1, pod2, pod3},
|
||||||
candidates: []*apiv1.Node{drainableNode, nonDrainableNode},
|
candidates: []*schedulernodeinfo.NodeInfo{drainableNodeInfo, nonDrainableNodeInfo},
|
||||||
allNodes: []*apiv1.Node{drainableNode, nonDrainableNode},
|
allNodes: []*schedulernodeinfo.NodeInfo{drainableNodeInfo, nonDrainableNodeInfo},
|
||||||
toRemove: []NodeToBeRemoved{drainableNodeToRemove},
|
toRemove: []NodeToBeRemoved{drainableNodeToRemove},
|
||||||
unremovable: []*UnremovableNode{{Node: nonDrainableNode, Reason: BlockedByPod, BlockingPod: &drain.BlockingPod{Pod: pod3, Reason: drain.NotReplicated}}},
|
unremovable: []*UnremovableNode{{Node: nonDrainableNode, Reason: BlockedByPod, BlockingPod: &drain.BlockingPod{Pod: pod3, Reason: drain.NotReplicated}}},
|
||||||
},
|
},
|
||||||
|
|
@ -324,8 +375,8 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "drainable node, and a full node that cannot fit anymore pods",
|
name: "drainable node, and a full node that cannot fit anymore pods",
|
||||||
pods: []*apiv1.Pod{pod1, pod2, pod4},
|
pods: []*apiv1.Pod{pod1, pod2, pod4},
|
||||||
candidates: []*apiv1.Node{drainableNode},
|
candidates: []*schedulernodeinfo.NodeInfo{drainableNodeInfo},
|
||||||
allNodes: []*apiv1.Node{drainableNode, fullNode},
|
allNodes: []*schedulernodeinfo.NodeInfo{drainableNodeInfo, fullNodeInfo},
|
||||||
toRemove: []NodeToBeRemoved{},
|
toRemove: []NodeToBeRemoved{},
|
||||||
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
|
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
|
||||||
},
|
},
|
||||||
|
|
@ -333,15 +384,20 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "4 nodes, 1 empty, 1 drainable",
|
name: "4 nodes, 1 empty, 1 drainable",
|
||||||
pods: []*apiv1.Pod{pod1, pod2, pod3, pod4},
|
pods: []*apiv1.Pod{pod1, pod2, pod3, pod4},
|
||||||
candidates: []*apiv1.Node{emptyNode, drainableNode},
|
candidates: []*schedulernodeinfo.NodeInfo{emptyNodeInfo, drainableNodeInfo},
|
||||||
allNodes: []*apiv1.Node{emptyNode, drainableNode, fullNode, nonDrainableNode},
|
allNodes: []*schedulernodeinfo.NodeInfo{emptyNodeInfo, drainableNodeInfo, fullNodeInfo, nonDrainableNodeInfo},
|
||||||
toRemove: []NodeToBeRemoved{emptyNodeToRemove, drainableNodeToRemove},
|
toRemove: []NodeToBeRemoved{emptyNodeToRemove, drainableNodeToRemove},
|
||||||
unremovable: []*UnremovableNode{},
|
unremovable: []*UnremovableNode{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
InitializeClusterSnapshotOrDie(t, clusterSnapshot, test.allNodes, test.pods)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
allNodesForSnapshot := []*apiv1.Node{}
|
||||||
|
for _, node := range test.allNodes {
|
||||||
|
allNodesForSnapshot = append(allNodesForSnapshot, node.Node())
|
||||||
|
}
|
||||||
|
InitializeClusterSnapshotOrDie(t, clusterSnapshot, allNodesForSnapshot, test.pods)
|
||||||
toRemove, unremovable, _, err := FindNodesToRemove(
|
toRemove, unremovable, _, err := FindNodesToRemove(
|
||||||
test.candidates, test.allNodes, test.pods, nil,
|
test.candidates, test.allNodes, test.pods, nil,
|
||||||
clusterSnapshot, predicateChecker, len(test.allNodes), true, map[string]string{},
|
clusterSnapshot, predicateChecker, len(test.allNodes), true, map[string]string{},
|
||||||
|
|
@ -350,5 +406,6 @@ func TestFindNodesToRemove(t *testing.T) {
|
||||||
fmt.Printf("Test scenario: %s, found len(toRemove)=%v, expected len(test.toRemove)=%v\n", test.name, len(toRemove), len(test.toRemove))
|
fmt.Printf("Test scenario: %s, found len(toRemove)=%v, expected len(test.toRemove)=%v\n", test.name, len(toRemove), len(test.toRemove))
|
||||||
assert.Equal(t, toRemove, test.toRemove)
|
assert.Equal(t, toRemove, test.toRemove)
|
||||||
assert.Equal(t, unremovable, test.unremovable)
|
assert.Equal(t, unremovable, test.unremovable)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue