Merge pull request #845 from aleksandra-malinowska/cherry-pick-stateful-set-drain-1.1

Cherry pick stateful set drain 1.1
This commit is contained in:
MaciekPytel 2018-05-11 14:48:02 +02:00 committed by GitHub
commit b359e16c72
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 49 additions and 3 deletions

View File

@ -762,14 +762,15 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
allGone = true
for _, pod := range pods {
podreturned, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err == nil {
if err == nil && (podreturned == nil || podreturned.Spec.NodeName == node.Name) {
glog.Errorf("Not deleted yet %v", podreturned)
allGone = false
break
}
if !kube_errors.IsNotFound(err) {
if err != nil && !kube_errors.IsNotFound(err) {
glog.Errorf("Failed to check pod %s/%s: %v", pod.Namespace, pod.Name, err)
allGone = false
break
}
}
if allGone {

View File

@ -635,6 +635,49 @@ func TestDrainNode(t *testing.T) {
assert.Equal(t, p2.Name, deleted[1])
}
func TestDrainNodeWithRescheduled(t *testing.T) {
deletedPods := make(chan string, 10)
fakeClient := &fake.Clientset{}
p1 := BuildTestPod("p1", 100, 0)
p2 := BuildTestPod("p2", 300, 0)
p2Rescheduled := BuildTestPod("p2", 300, 0)
p2Rescheduled.Spec.NodeName = "n2"
n1 := BuildTestNode("n1", 1000, 1000)
SetNodeReadyState(n1, true, time.Time{})
fakeClient.Fake.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
if getAction == nil {
return false, nil, nil
}
if getAction.GetName() == "p2" {
return true, p2Rescheduled, nil
}
return true, nil, errors.NewNotFound(apiv1.Resource("pod"), "whatever")
})
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction)
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
if eviction == nil {
return false, nil, nil
}
deletedPods <- eviction.Name
return true, nil, nil
})
err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second)
assert.NoError(t, err)
deleted := make([]string, 0)
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, getStringFromChan(deletedPods))
sort.Strings(deleted)
assert.Equal(t, p1.Name, deleted[0])
assert.Equal(t, p2.Name, deleted[1])
}
func TestDrainNodeWithRetries(t *testing.T) {
deletedPods := make(chan string, 10)
// Simulate pdb of size 1, by making them goroutine succeed sequentially

View File

@ -85,6 +85,8 @@ func NewStaticAutoscaler(opts AutoscalingOptions, predicateChecker *simulator.Pr
func (a *StaticAutoscaler) CleanUp() {
// CA can die at any time. Removing taints that might have been left from the previous run.
if readyNodes, err := a.ReadyNodeLister().List(); err != nil {
glog.Errorf("Failed to list ready nodes, not cleaning up taints: %v", err)
} else {
cleanToBeDeleted(readyNodes, a.AutoscalingContext.ClientSet, a.Recorder)
}
}

View File

@ -57,7 +57,7 @@ func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error {
func addToBeDeletedTaint(node *apiv1.Node) (bool, error) {
for _, taint := range node.Spec.Taints {
if taint.Key == ToBeDeletedTaint {
glog.V(2).Infof("ToBeDeletedTaint already present on on node %v", taint, node.Name)
glog.V(2).Infof("ToBeDeletedTaint already present on node %v, taint: %v", node.Name, taint)
return false, nil
}
}