Merge pull request #3701 from yaroslava-serdiuk/DS_evict
add daemonset eviction for non-empty nodes
This commit is contained in:
commit
aa9367f56f
|
|
@ -952,7 +952,7 @@ func (sd *ScaleDown) TryToScaleDown(
|
|||
errors.InternalError, "failed to find node group for %s", toRemove.Node.Name)}
|
||||
return
|
||||
}
|
||||
result = sd.deleteNode(toRemove.Node, toRemove.PodsToReschedule, nodeGroup)
|
||||
result = sd.deleteNode(toRemove.Node, toRemove.PodsToReschedule, toRemove.DaemonSetPods, nodeGroup)
|
||||
if result.ResultType != status.NodeDeleteOk {
|
||||
klog.Errorf("Failed to delete %s: %v", toRemove.Node.Name, result.Err)
|
||||
return
|
||||
|
|
@ -1106,7 +1106,7 @@ func (sd *ScaleDown) scheduleDeleteEmptyNodes(emptyNodes []*apiv1.Node, client k
|
|||
return deletedNodes, nil
|
||||
}
|
||||
|
||||
func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod,
|
||||
func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod,
|
||||
nodeGroup cloudprovider.NodeGroup) status.NodeDeleteResult {
|
||||
deleteSuccessful := false
|
||||
drainSuccessful := false
|
||||
|
|
@ -1134,7 +1134,7 @@ func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod,
|
|||
sd.context.Recorder.Eventf(node, apiv1.EventTypeNormal, "ScaleDown", "marked the node as toBeDeleted/unschedulable")
|
||||
|
||||
// attempt drain
|
||||
evictionResults, err := drainNode(node, pods, sd.context.ClientSet, sd.context.Recorder, sd.context.MaxGracefulTerminationSec, MaxPodEvictionTime, EvictionRetryTime, PodEvictionHeadroom)
|
||||
evictionResults, err := drainNode(node, pods, daemonSetPods, sd.context.ClientSet, sd.context.Recorder, sd.context.MaxGracefulTerminationSec, MaxPodEvictionTime, EvictionRetryTime, PodEvictionHeadroom)
|
||||
if err != nil {
|
||||
return status.NodeDeleteResult{ResultType: status.NodeDeleteErrorFailedToEvictPods, Err: err, PodEvictionResults: evictionResults}
|
||||
}
|
||||
|
|
@ -1154,7 +1154,7 @@ func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod,
|
|||
return status.NodeDeleteResult{ResultType: status.NodeDeleteOk}
|
||||
}
|
||||
|
||||
func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
|
||||
func evictPod(podToEvict *apiv1.Pod, isDaemonSetPod bool, client kube_client.Interface, recorder kube_record.EventRecorder,
|
||||
maxGracefulTerminationSec int, retryUntil time.Time, waitBetweenRetries time.Duration) status.PodEvictionResult {
|
||||
recorder.Eventf(podToEvict, apiv1.EventTypeNormal, "ScaleDown", "deleting pod for node scale down")
|
||||
|
||||
|
|
@ -1184,38 +1184,54 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube
|
|||
return status.PodEvictionResult{Pod: podToEvict, TimedOut: false, Err: nil}
|
||||
}
|
||||
}
|
||||
klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError)
|
||||
recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown")
|
||||
if !isDaemonSetPod {
|
||||
klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError)
|
||||
recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown")
|
||||
}
|
||||
return status.PodEvictionResult{Pod: podToEvict, TimedOut: true, Err: fmt.Errorf("failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError)}
|
||||
}
|
||||
|
||||
// Performs drain logic on the node. Marks the node as unschedulable and later removes all pods, giving
|
||||
// them up to MaxGracefulTerminationTime to finish.
|
||||
func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
|
||||
func drainNode(node *apiv1.Node, pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
|
||||
maxGracefulTerminationSec int, maxPodEvictionTime time.Duration, waitBetweenRetries time.Duration,
|
||||
podEvictionHeadroom time.Duration) (evictionResults map[string]status.PodEvictionResult, err error) {
|
||||
|
||||
evictionResults = make(map[string]status.PodEvictionResult)
|
||||
toEvict := len(pods)
|
||||
retryUntil := time.Now().Add(maxPodEvictionTime)
|
||||
confirmations := make(chan status.PodEvictionResult, toEvict)
|
||||
confirmations := make(chan status.PodEvictionResult, len(pods))
|
||||
daemonSetConfirmations := make(chan status.PodEvictionResult, len(daemonSetPods))
|
||||
for _, pod := range pods {
|
||||
evictionResults[pod.Name] = status.PodEvictionResult{Pod: pod, TimedOut: true, Err: nil}
|
||||
go func(podToEvict *apiv1.Pod) {
|
||||
confirmations <- evictPod(podToEvict, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
|
||||
confirmations <- evictPod(podToEvict, false, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
|
||||
}(pod)
|
||||
}
|
||||
|
||||
for range pods {
|
||||
// Perform eviction of daemonset. We don't want to raise an error if daemonsetPod wasn't evict properly
|
||||
for _, daemonSetPod := range daemonSetPods {
|
||||
go func(podToEvict *apiv1.Pod) {
|
||||
daemonSetConfirmations <- evictPod(podToEvict, true, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
|
||||
}(daemonSetPod)
|
||||
|
||||
}
|
||||
|
||||
podsEvictionCounter := 0
|
||||
for i := 0; i < len(pods)+len(daemonSetPods); i++ {
|
||||
select {
|
||||
case evictionResult := <-confirmations:
|
||||
podsEvictionCounter++
|
||||
evictionResults[evictionResult.Pod.Name] = evictionResult
|
||||
if evictionResult.WasEvictionSuccessful() {
|
||||
metrics.RegisterEvictions(1)
|
||||
}
|
||||
case <-daemonSetConfirmations:
|
||||
case <-time.After(retryUntil.Sub(time.Now()) + 5*time.Second):
|
||||
// All pods initially had results with TimedOut set to true, so the ones that didn't receive an actual result are correctly marked as timed out.
|
||||
return evictionResults, errors.NewAutoscalerError(errors.ApiCallError, "Failed to drain node %s/%s: timeout when waiting for creating evictions", node.Namespace, node.Name)
|
||||
if podsEvictionCounter < len(pods) {
|
||||
// All pods initially had results with TimedOut set to true, so the ones that didn't receive an actual result are correctly marked as timed out.
|
||||
return evictionResults, errors.NewAutoscalerError(errors.ApiCallError, "Failed to drain node %s/%s: timeout when waiting for creating evictions", node.Namespace, node.Name)
|
||||
}
|
||||
klog.Infof("Timeout when waiting for creating daemonSetPods eviction")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -643,7 +643,7 @@ func TestDeleteNode(t *testing.T) {
|
|||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
||||
// attempt delete
|
||||
result := sd.deleteNode(n1, pods, provider.GetNodeGroup("ng1"))
|
||||
result := sd.deleteNode(n1, pods, []*apiv1.Pod{}, provider.GetNodeGroup("ng1"))
|
||||
|
||||
// verify
|
||||
if scenario.expectedDeletion {
|
||||
|
|
@ -672,7 +672,9 @@ func TestDrainNode(t *testing.T) {
|
|||
|
||||
p1 := BuildTestPod("p1", 100, 0)
|
||||
p2 := BuildTestPod("p2", 300, 0)
|
||||
d1 := BuildTestPod("d1", 150, 0)
|
||||
n1 := BuildTestNode("n1", 1000, 1000)
|
||||
|
||||
SetNodeReadyState(n1, true, time.Time{})
|
||||
|
||||
fakeClient.Fake.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
|
|
@ -690,14 +692,17 @@ func TestDrainNode(t *testing.T) {
|
|||
deletedPods <- eviction.Name
|
||||
return true, nil, nil
|
||||
})
|
||||
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, []*apiv1.Pod{d1}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
assert.NoError(t, err)
|
||||
deleted := make([]string, 0)
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
|
||||
sort.Strings(deleted)
|
||||
assert.Equal(t, p1.Name, deleted[0])
|
||||
assert.Equal(t, p2.Name, deleted[1])
|
||||
assert.Equal(t, d1.Name, deleted[0])
|
||||
assert.Equal(t, p1.Name, deleted[1])
|
||||
assert.Equal(t, p2.Name, deleted[2])
|
||||
}
|
||||
|
||||
func TestDrainNodeWithRescheduled(t *testing.T) {
|
||||
|
|
@ -733,7 +738,7 @@ func TestDrainNodeWithRescheduled(t *testing.T) {
|
|||
deletedPods <- eviction.Name
|
||||
return true, nil, nil
|
||||
})
|
||||
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, []*apiv1.Pod{}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
assert.NoError(t, err)
|
||||
deleted := make([]string, 0)
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
|
|
@ -755,6 +760,7 @@ func TestDrainNodeWithRetries(t *testing.T) {
|
|||
p1 := BuildTestPod("p1", 100, 0)
|
||||
p2 := BuildTestPod("p2", 300, 0)
|
||||
p3 := BuildTestPod("p3", 300, 0)
|
||||
d1 := BuildTestPod("d1", 150, 0)
|
||||
n1 := BuildTestNode("n1", 1000, 1000)
|
||||
SetNodeReadyState(n1, true, time.Time{})
|
||||
|
||||
|
|
@ -782,16 +788,62 @@ func TestDrainNodeWithRetries(t *testing.T) {
|
|||
return true, nil, fmt.Errorf("too many concurrent evictions")
|
||||
}
|
||||
})
|
||||
_, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
_, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, []*apiv1.Pod{d1}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
assert.NoError(t, err)
|
||||
deleted := make([]string, 0)
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
|
||||
sort.Strings(deleted)
|
||||
assert.Equal(t, p1.Name, deleted[0])
|
||||
assert.Equal(t, p2.Name, deleted[1])
|
||||
assert.Equal(t, p3.Name, deleted[2])
|
||||
assert.Equal(t, d1.Name, deleted[0])
|
||||
assert.Equal(t, p1.Name, deleted[1])
|
||||
assert.Equal(t, p2.Name, deleted[2])
|
||||
assert.Equal(t, p3.Name, deleted[3])
|
||||
}
|
||||
|
||||
func TestDrainNodeDaemonSetEvictionFailure(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
p1 := BuildTestPod("p1", 100, 0)
|
||||
p2 := BuildTestPod("p2", 300, 0)
|
||||
d1 := BuildTestPod("d1", 150, 0)
|
||||
d2 := BuildTestPod("d2", 250, 0)
|
||||
n1 := BuildTestNode("n1", 1000, 1000)
|
||||
e1 := fmt.Errorf("eviction_error: d1")
|
||||
e2 := fmt.Errorf("eviction_error: d2")
|
||||
|
||||
fakeClient.Fake.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.NewNotFound(apiv1.Resource("pod"), "whatever")
|
||||
})
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
createAction := action.(core.CreateAction)
|
||||
if createAction == nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
eviction := createAction.GetObject().(*policyv1.Eviction)
|
||||
if eviction == nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
if eviction.Name == "d1" {
|
||||
return true, nil, e1
|
||||
}
|
||||
if eviction.Name == "d2" {
|
||||
return true, nil, e2
|
||||
}
|
||||
return true, nil, nil
|
||||
})
|
||||
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2}, []*apiv1.Pod{d1, d2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 0*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(evictionResults))
|
||||
assert.Equal(t, p1, evictionResults["p1"].Pod)
|
||||
assert.Equal(t, p2, evictionResults["p2"].Pod)
|
||||
assert.NoError(t, evictionResults["p1"].Err)
|
||||
assert.NoError(t, evictionResults["p2"].Err)
|
||||
assert.False(t, evictionResults["p1"].TimedOut)
|
||||
assert.False(t, evictionResults["p2"].TimedOut)
|
||||
assert.True(t, evictionResults["p1"].WasEvictionSuccessful())
|
||||
assert.True(t, evictionResults["p2"].WasEvictionSuccessful())
|
||||
}
|
||||
|
||||
func TestDrainNodeEvictionFailure(t *testing.T) {
|
||||
|
|
@ -825,7 +877,7 @@ func TestDrainNodeEvictionFailure(t *testing.T) {
|
|||
return true, nil, nil
|
||||
})
|
||||
|
||||
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 0*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, []*apiv1.Pod{}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 0*time.Second, 0*time.Second, PodEvictionHeadroom)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 4, len(evictionResults))
|
||||
assert.Equal(t, *p1, *evictionResults["p1"].Pod)
|
||||
|
|
@ -874,7 +926,7 @@ func TestDrainNodeDisappearanceFailure(t *testing.T) {
|
|||
return true, nil, nil
|
||||
})
|
||||
|
||||
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 0, 0*time.Second, 0*time.Second, 0*time.Second)
|
||||
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, []*apiv1.Pod{}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 0, 0*time.Second, 0*time.Second, 0*time.Second)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 4, len(evictionResults))
|
||||
assert.Equal(t, *p1, *evictionResults["p1"].Pod)
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ type NodeToBeRemoved struct {
|
|||
Node *apiv1.Node
|
||||
// PodsToReschedule contains pods on the node that should be rescheduled elsewhere.
|
||||
PodsToReschedule []*apiv1.Pod
|
||||
DaemonSetPods []*apiv1.Pod
|
||||
}
|
||||
|
||||
// UnremovableNode represents a node that can't be removed by CA.
|
||||
|
|
@ -147,6 +148,7 @@ candidateloop:
|
|||
klog.V(2).Infof("%s: %s for removal", evaluationType, nodeName)
|
||||
|
||||
var podsToRemove []*apiv1.Pod
|
||||
var daemonSetPods []*apiv1.Pod
|
||||
var blockingPod *drain.BlockingPod
|
||||
|
||||
if _, found := destinationMap[nodeName]; !found {
|
||||
|
|
@ -156,10 +158,10 @@ candidateloop:
|
|||
}
|
||||
|
||||
if fastCheck {
|
||||
podsToRemove, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
|
||||
podsToRemove, daemonSetPods, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
|
||||
podDisruptionBudgets)
|
||||
} else {
|
||||
podsToRemove, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
|
||||
podsToRemove, daemonSetPods, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
|
||||
podDisruptionBudgets)
|
||||
}
|
||||
|
||||
|
|
@ -180,6 +182,7 @@ candidateloop:
|
|||
result = append(result, NodeToBeRemoved{
|
||||
Node: nodeInfo.Node(),
|
||||
PodsToReschedule: podsToRemove,
|
||||
DaemonSetPods: daemonSetPods,
|
||||
})
|
||||
klog.V(2).Infof("%s: node %s may be removed", evaluationType, nodeName)
|
||||
if len(result) >= maxCount {
|
||||
|
|
@ -203,7 +206,7 @@ func FindEmptyNodesToRemove(snapshot ClusterSnapshot, candidates []string) []str
|
|||
continue
|
||||
}
|
||||
// Should block on all pods.
|
||||
podsToRemove, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
|
||||
podsToRemove, _, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
|
||||
if err == nil && len(podsToRemove) == 0 {
|
||||
result = append(result, node)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -308,10 +308,12 @@ func TestFindNodesToRemove(t *testing.T) {
|
|||
emptyNodeToRemove := NodeToBeRemoved{
|
||||
Node: emptyNode,
|
||||
PodsToReschedule: []*apiv1.Pod{},
|
||||
DaemonSetPods: []*apiv1.Pod{},
|
||||
}
|
||||
drainableNodeToRemove := NodeToBeRemoved{
|
||||
Node: drainableNode,
|
||||
PodsToReschedule: []*apiv1.Pod{pod1, pod2},
|
||||
DaemonSetPods: []*apiv1.Pod{},
|
||||
}
|
||||
|
||||
clusterSnapshot := NewBasicClusterSnapshot()
|
||||
|
|
|
|||
|
|
@ -29,18 +29,18 @@ import (
|
|||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// FastGetPodsToMove returns a list of pods that should be moved elsewhere if the node
|
||||
// FastGetPodsToMove returns a list of pods that should be moved elsewhere
|
||||
// and a list of DaemonSet pods that should be evicted if the node
|
||||
// is drained. Raises error if there is an unreplicated pod.
|
||||
// Based on kubectl drain code. It makes an assumption that RC, DS, Jobs and RS were deleted
|
||||
// along with their pods (no abandoned pods with dangling created-by annotation). Useful for fast
|
||||
// checks.
|
||||
func FastGetPodsToMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool,
|
||||
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, *drain.BlockingPod, error) {
|
||||
var pods []*apiv1.Pod
|
||||
pdbs []*policyv1.PodDisruptionBudget) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
pods, blockingPod, err := drain.GetPodsForDeletionOnNodeDrain(
|
||||
pods, daemonSetPods, blockingPod, err = drain.GetPodsForDeletionOnNodeDrain(
|
||||
pods,
|
||||
pdbs,
|
||||
skipNodesWithSystemPods,
|
||||
|
|
@ -51,27 +51,27 @@ func FastGetPodsToMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSyste
|
|||
time.Now())
|
||||
|
||||
if err != nil {
|
||||
return pods, blockingPod, err
|
||||
return pods, daemonSetPods, blockingPod, err
|
||||
}
|
||||
if pdbBlockingPod, err := checkPdbs(pods, pdbs); err != nil {
|
||||
return []*apiv1.Pod{}, pdbBlockingPod, err
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, pdbBlockingPod, err
|
||||
}
|
||||
|
||||
return pods, nil, nil
|
||||
return pods, daemonSetPods, nil, nil
|
||||
}
|
||||
|
||||
// DetailedGetPodsForMove returns a list of pods that should be moved elsewhere if the node
|
||||
// DetailedGetPodsForMove returns a list of pods that should be moved elsewhere
|
||||
// and a list of DaemonSet pods that should be evicted if the node
|
||||
// is drained. Raises error if there is an unreplicated pod.
|
||||
// Based on kubectl drain code. It checks whether RC, DS, Jobs and RS that created these pods
|
||||
// still exist.
|
||||
func DetailedGetPodsForMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSystemPods bool,
|
||||
skipNodesWithLocalStorage bool, listers kube_util.ListerRegistry, minReplicaCount int32,
|
||||
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, *drain.BlockingPod, error) {
|
||||
var pods []*apiv1.Pod
|
||||
pdbs []*policyv1.PodDisruptionBudget) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
pods, blockingPod, err := drain.GetPodsForDeletionOnNodeDrain(
|
||||
pods, daemonSetPods, blockingPod, err = drain.GetPodsForDeletionOnNodeDrain(
|
||||
pods,
|
||||
pdbs,
|
||||
skipNodesWithSystemPods,
|
||||
|
|
@ -81,13 +81,13 @@ func DetailedGetPodsForMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWith
|
|||
minReplicaCount,
|
||||
time.Now())
|
||||
if err != nil {
|
||||
return pods, blockingPod, err
|
||||
return pods, daemonSetPods, blockingPod, err
|
||||
}
|
||||
if pdbBlockingPod, err := checkPdbs(pods, pdbs); err != nil {
|
||||
return []*apiv1.Pod{}, pdbBlockingPod, err
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, pdbBlockingPod, err
|
||||
}
|
||||
|
||||
return pods, nil, nil
|
||||
return pods, daemonSetPods, nil, nil
|
||||
}
|
||||
|
||||
func checkPdbs(pods []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget) (*drain.BlockingPod, error) {
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
},
|
||||
}
|
||||
_, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod1), true, true, nil)
|
||||
_, _, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod1), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod1, Reason: drain.NotReplicated}, blockingPod)
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
r2, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod2), true, true, nil)
|
||||
r2, _, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod2), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r2))
|
||||
|
|
@ -68,7 +68,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
r3, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod3), true, true, nil)
|
||||
r3, _, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod3), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 0, len(r3))
|
||||
|
|
@ -81,7 +81,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("ds", "DaemonSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
r4, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod2, pod3, pod4), true, true, nil)
|
||||
r4, _, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod2, pod3, pod4), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r4))
|
||||
|
|
@ -95,7 +95,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod5), true, true, nil)
|
||||
_, _, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod5), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod5, Reason: drain.UnmovableKubeSystemPod}, blockingPod)
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod6), true, true, nil)
|
||||
_, _, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod6), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod6, Reason: drain.LocalStorageRequested}, blockingPod)
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
r7, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod7), true, true, nil)
|
||||
r7, _, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod7), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r7))
|
||||
|
|
@ -175,7 +175,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod8), true, true, []*policyv1.PodDisruptionBudget{pdb8})
|
||||
_, _, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod8), true, true, []*policyv1.PodDisruptionBudget{pdb8})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod8, Reason: drain.NotEnoughPdb}, blockingPod)
|
||||
|
||||
|
|
@ -209,7 +209,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
r9, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod9), true, true, []*policyv1.PodDisruptionBudget{pdb9})
|
||||
r9, _, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod9), true, true, []*policyv1.PodDisruptionBudget{pdb9})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r9))
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ const (
|
|||
)
|
||||
|
||||
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
|
||||
// about possibly problematic pods (unreplicated and daemonsets).
|
||||
// about possibly problematic pods (unreplicated and DaemonSets).
|
||||
func GetPodsForDeletionOnNodeDrain(
|
||||
podList []*apiv1.Pod,
|
||||
pdbs []*policyv1.PodDisruptionBudget,
|
||||
|
|
@ -81,9 +81,10 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
checkReferences bool, // Setting this to true requires client to be not-null.
|
||||
listers kube_util.ListerRegistry,
|
||||
minReplica int32,
|
||||
currentTime time.Time) ([]*apiv1.Pod, *BlockingPod, error) {
|
||||
currentTime time.Time) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *BlockingPod, err error) {
|
||||
|
||||
pods := []*apiv1.Pod{}
|
||||
pods = []*apiv1.Pod{}
|
||||
daemonSetPods = []*apiv1.Pod{}
|
||||
// filter kube-system PDBs to avoid doing it for every kube-system pod
|
||||
kubeSystemPDBs := make([]*policyv1.PodDisruptionBudget, 0)
|
||||
for _, pdb := range pdbs {
|
||||
|
|
@ -105,7 +106,7 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
continue
|
||||
}
|
||||
|
||||
daemonsetPod := false
|
||||
isDaemonSetPod := false
|
||||
replicated := false
|
||||
safeToEvict := hasSafeToEvictAnnotation(pod)
|
||||
terminal := isPodTerminal(pod)
|
||||
|
|
@ -128,26 +129,26 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
// TODO: replace the minReplica check with pod disruption budget.
|
||||
if err == nil && rc != nil {
|
||||
if rc.Spec.Replicas != nil && *rc.Spec.Replicas < minReplica {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: MinReplicasReached}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: MinReplicasReached}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
|
||||
pod.Namespace, pod.Name, rc.Spec.Replicas, minReplica)
|
||||
}
|
||||
replicated = true
|
||||
} else {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
replicated = true
|
||||
}
|
||||
} else if pod_util.IsDaemonSetPod(pod) {
|
||||
daemonsetPod = true
|
||||
isDaemonSetPod = true
|
||||
// don't have listener for other DaemonSet kind
|
||||
// TODO: we should use a generic client for checking the reference.
|
||||
if checkReferences && refKind == "DaemonSet" {
|
||||
_, err := listers.DaemonSetLister().DaemonSets(controllerNamespace).Get(controllerRef.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("daemonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("daemonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err)
|
||||
} else if err != nil {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: UnexpectedError}, fmt.Errorf("error when trying to get daemonset for %s/%s , err: %v", pod.Namespace, pod.Name, err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: UnexpectedError}, fmt.Errorf("error when trying to get daemonset for %s/%s , err: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
} else if refKind == "Job" {
|
||||
|
|
@ -160,7 +161,7 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
if err == nil && job != nil {
|
||||
replicated = true
|
||||
} else {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
replicated = true
|
||||
|
|
@ -174,12 +175,12 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
// sophisticated than this
|
||||
if err == nil && rs != nil {
|
||||
if rs.Spec.Replicas != nil && *rs.Spec.Replicas < minReplica {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: MinReplicasReached}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: MinReplicasReached}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
|
||||
pod.Namespace, pod.Name, rs.Spec.Replicas, minReplica)
|
||||
}
|
||||
replicated = true
|
||||
} else {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
replicated = true
|
||||
|
|
@ -194,39 +195,40 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
if err == nil && ss != nil {
|
||||
replicated = true
|
||||
} else {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("statefulset for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: ControllerNotFound}, fmt.Errorf("statefulset for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
replicated = true
|
||||
}
|
||||
}
|
||||
if daemonsetPod {
|
||||
if isDaemonSetPod {
|
||||
daemonSetPods = append(daemonSetPods, pod)
|
||||
continue
|
||||
}
|
||||
|
||||
if !safeToEvict && !terminal {
|
||||
if !replicated {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: NotReplicated}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: NotReplicated}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name)
|
||||
}
|
||||
if pod.Namespace == "kube-system" && skipNodesWithSystemPods {
|
||||
hasPDB, err := checkKubeSystemPDBs(pod, kubeSystemPDBs)
|
||||
if err != nil {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: UnexpectedError}, fmt.Errorf("error matching pods to pdbs: %v", err)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: UnexpectedError}, fmt.Errorf("error matching pods to pdbs: %v", err)
|
||||
}
|
||||
if !hasPDB {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: UnmovableKubeSystemPod}, fmt.Errorf("non-daemonset, non-mirrored, non-pdb-assigned kube-system pod present: %s", pod.Name)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: UnmovableKubeSystemPod}, fmt.Errorf("non-daemonset, non-mirrored, non-pdb-assigned kube-system pod present: %s", pod.Name)
|
||||
}
|
||||
}
|
||||
if HasLocalStorage(pod) && skipNodesWithLocalStorage {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: LocalStorageRequested}, fmt.Errorf("pod with local storage present: %s", pod.Name)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: LocalStorageRequested}, fmt.Errorf("pod with local storage present: %s", pod.Name)
|
||||
}
|
||||
if hasNotSafeToEvictAnnotation(pod) {
|
||||
return []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: NotSafeToEvictAnnotation}, fmt.Errorf("pod annotated as not safe to evict present: %s", pod.Name)
|
||||
return []*apiv1.Pod{}, []*apiv1.Pod{}, &BlockingPod{Pod: pod, Reason: NotSafeToEvictAnnotation}, fmt.Errorf("pod annotated as not safe to evict present: %s", pod.Name)
|
||||
}
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
return pods, nil, nil
|
||||
return pods, daemonSetPods, nil, nil
|
||||
}
|
||||
|
||||
// ControllerRef returns the OwnerReference to pod's controller.
|
||||
|
|
|
|||
|
|
@ -363,187 +363,210 @@ func TestDrain(t *testing.T) {
|
|||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pods []*apiv1.Pod
|
||||
pdbs []*policyv1.PodDisruptionBudget
|
||||
rcs []*apiv1.ReplicationController
|
||||
replicaSets []*appsv1.ReplicaSet
|
||||
expectFatal bool
|
||||
expectPods []*apiv1.Pod
|
||||
expectBlockingPod *BlockingPod
|
||||
description string
|
||||
pods []*apiv1.Pod
|
||||
pdbs []*policyv1.PodDisruptionBudget
|
||||
rcs []*apiv1.ReplicationController
|
||||
replicaSets []*appsv1.ReplicaSet
|
||||
expectFatal bool
|
||||
expectPods []*apiv1.Pod
|
||||
expectDaemonSetPods []*apiv1.Pod
|
||||
expectBlockingPod *BlockingPod
|
||||
}{
|
||||
{
|
||||
description: "RC-managed pod",
|
||||
pods: []*apiv1.Pod{rcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rcPod},
|
||||
description: "RC-managed pod",
|
||||
pods: []*apiv1.Pod{rcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rcPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "DS-managed pod",
|
||||
pods: []*apiv1.Pod{dsPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
description: "DS-managed pod",
|
||||
pods: []*apiv1.Pod{dsPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectDaemonSetPods: []*apiv1.Pod{dsPod},
|
||||
},
|
||||
{
|
||||
description: "DS-managed pod by a custom Daemonset",
|
||||
pods: []*apiv1.Pod{cdsPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
description: "DS-managed pod by a custom Daemonset",
|
||||
pods: []*apiv1.Pod{cdsPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectDaemonSetPods: []*apiv1.Pod{cdsPod},
|
||||
},
|
||||
{
|
||||
description: "Job-managed pod",
|
||||
pods: []*apiv1.Pod{jobPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{jobPod},
|
||||
description: "Job-managed pod",
|
||||
pods: []*apiv1.Pod{jobPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{jobPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "SS-managed pod",
|
||||
pods: []*apiv1.Pod{ssPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{ssPod},
|
||||
description: "SS-managed pod",
|
||||
pods: []*apiv1.Pod{ssPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{ssPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "RS-managed pod",
|
||||
pods: []*apiv1.Pod{rsPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
replicaSets: []*appsv1.ReplicaSet{&rs},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rsPod},
|
||||
description: "RS-managed pod",
|
||||
pods: []*apiv1.Pod{rsPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
replicaSets: []*appsv1.ReplicaSet{&rs},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rsPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "RS-managed pod that is being deleted",
|
||||
pods: []*apiv1.Pod{rsPodDeleted},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
replicaSets: []*appsv1.ReplicaSet{&rs},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
description: "RS-managed pod that is being deleted",
|
||||
pods: []*apiv1.Pod{rsPodDeleted},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
replicaSets: []*appsv1.ReplicaSet{&rs},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "naked pod",
|
||||
pods: []*apiv1.Pod{nakedPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: nakedPod, Reason: NotReplicated},
|
||||
description: "naked pod",
|
||||
pods: []*apiv1.Pod{nakedPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: nakedPod, Reason: NotReplicated},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "pod with EmptyDir",
|
||||
pods: []*apiv1.Pod{emptydirPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: emptydirPod, Reason: LocalStorageRequested},
|
||||
description: "pod with EmptyDir",
|
||||
pods: []*apiv1.Pod{emptydirPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: emptydirPod, Reason: LocalStorageRequested},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "failed pod",
|
||||
pods: []*apiv1.Pod{failedPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{failedPod},
|
||||
description: "failed pod",
|
||||
pods: []*apiv1.Pod{failedPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{failedPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "evicted pod",
|
||||
pods: []*apiv1.Pod{evictedPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{evictedPod},
|
||||
description: "evicted pod",
|
||||
pods: []*apiv1.Pod{evictedPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{evictedPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "pod in terminal state",
|
||||
pods: []*apiv1.Pod{terminalPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{terminalPod},
|
||||
description: "pod in terminal state",
|
||||
pods: []*apiv1.Pod{terminalPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{terminalPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "pod with PodSafeToEvict annotation",
|
||||
pods: []*apiv1.Pod{safePod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{safePod},
|
||||
description: "pod with PodSafeToEvict annotation",
|
||||
pods: []*apiv1.Pod{safePod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{safePod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "kube-system pod with PodSafeToEvict annotation",
|
||||
pods: []*apiv1.Pod{kubeSystemSafePod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{kubeSystemSafePod},
|
||||
description: "kube-system pod with PodSafeToEvict annotation",
|
||||
pods: []*apiv1.Pod{kubeSystemSafePod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{kubeSystemSafePod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "pod with EmptyDir and PodSafeToEvict annotation",
|
||||
pods: []*apiv1.Pod{emptydirSafePod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{emptydirSafePod},
|
||||
description: "pod with EmptyDir and PodSafeToEvict annotation",
|
||||
pods: []*apiv1.Pod{emptydirSafePod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{emptydirSafePod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "RC-managed pod with PodSafeToEvict=false annotation",
|
||||
pods: []*apiv1.Pod{unsafeRcPod},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: unsafeRcPod, Reason: NotSafeToEvictAnnotation},
|
||||
description: "RC-managed pod with PodSafeToEvict=false annotation",
|
||||
pods: []*apiv1.Pod{unsafeRcPod},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: unsafeRcPod, Reason: NotSafeToEvictAnnotation},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Job-managed pod with PodSafeToEvict=false annotation",
|
||||
pods: []*apiv1.Pod{unsafeJobPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: unsafeJobPod, Reason: NotSafeToEvictAnnotation},
|
||||
description: "Job-managed pod with PodSafeToEvict=false annotation",
|
||||
pods: []*apiv1.Pod{unsafeJobPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: unsafeJobPod, Reason: NotSafeToEvictAnnotation},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "empty PDB with RC-managed pod",
|
||||
pods: []*apiv1.Pod{rcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{emptyPDB},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rcPod},
|
||||
description: "empty PDB with RC-managed pod",
|
||||
pods: []*apiv1.Pod{rcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{emptyPDB},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rcPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "kube-system PDB with matching kube-system pod",
|
||||
pods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{kubeSystemPDB},
|
||||
rcs: []*apiv1.ReplicationController{&kubeSystemRc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
description: "kube-system PDB with matching kube-system pod",
|
||||
pods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{kubeSystemPDB},
|
||||
rcs: []*apiv1.ReplicationController{&kubeSystemRc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "kube-system PDB with non-matching kube-system pod",
|
||||
pods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{kubeSystemFakePDB},
|
||||
rcs: []*apiv1.ReplicationController{&kubeSystemRc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: kubeSystemRcPod, Reason: UnmovableKubeSystemPod},
|
||||
description: "kube-system PDB with non-matching kube-system pod",
|
||||
pods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{kubeSystemFakePDB},
|
||||
rcs: []*apiv1.ReplicationController{&kubeSystemRc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: kubeSystemRcPod, Reason: UnmovableKubeSystemPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "kube-system PDB with default namespace pod",
|
||||
pods: []*apiv1.Pod{rcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{kubeSystemPDB},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rcPod},
|
||||
description: "kube-system PDB with default namespace pod",
|
||||
pods: []*apiv1.Pod{rcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{kubeSystemPDB},
|
||||
rcs: []*apiv1.ReplicationController{&rc},
|
||||
expectFatal: false,
|
||||
expectPods: []*apiv1.Pod{rcPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "default namespace PDB with matching labels kube-system pod",
|
||||
pods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{defaultNamespacePDB},
|
||||
rcs: []*apiv1.ReplicationController{&kubeSystemRc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: kubeSystemRcPod, Reason: UnmovableKubeSystemPod},
|
||||
description: "default namespace PDB with matching labels kube-system pod",
|
||||
pods: []*apiv1.Pod{kubeSystemRcPod},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{defaultNamespacePDB},
|
||||
rcs: []*apiv1.ReplicationController{&kubeSystemRc},
|
||||
expectFatal: true,
|
||||
expectPods: []*apiv1.Pod{},
|
||||
expectBlockingPod: &BlockingPod{Pod: kubeSystemRcPod, Reason: UnmovableKubeSystemPod},
|
||||
expectDaemonSetPods: []*apiv1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -569,7 +592,7 @@ func TestDrain(t *testing.T) {
|
|||
|
||||
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, dsLister, rcLister, jobLister, rsLister, ssLister)
|
||||
|
||||
pods, blockingPod, err := GetPodsForDeletionOnNodeDrain(test.pods, test.pdbs, true, true, true, registry, 0, time.Now())
|
||||
pods, daemonSetPods, blockingPod, err := GetPodsForDeletionOnNodeDrain(test.pods, test.pdbs, true, true, true, registry, 0, time.Now())
|
||||
|
||||
if test.expectFatal {
|
||||
assert.Equal(t, test.expectBlockingPod, blockingPod)
|
||||
|
|
@ -588,5 +611,7 @@ func TestDrain(t *testing.T) {
|
|||
if len(pods) != len(test.expectPods) {
|
||||
t.Fatalf("Wrong pod list content: %v", test.description)
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, test.expectDaemonSetPods, daemonSetPods)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue