CA: add NodeInfo to Rule.Drainable interface
This commit is contained in:
parent
76d914b38c
commit
6f03ca0f58
|
@ -52,7 +52,7 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options.
|
|||
}
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pod := podInfo.Pod
|
||||
status := drainabilityRules.Drainable(drainCtx, pod)
|
||||
status := drainabilityRules.Drainable(drainCtx, pod, nodeInfo)
|
||||
switch status.Outcome {
|
||||
case drainability.UndefinedOutcome, drainability.DrainOk:
|
||||
if pod_util.IsDaemonSetPod(pod) {
|
||||
|
|
|
@ -794,7 +794,7 @@ func (a alwaysDrain) Name() string {
|
|||
return "AlwaysDrain"
|
||||
}
|
||||
|
||||
func (a alwaysDrain) Drainable(*drainability.DrainContext, *apiv1.Pod) drainability.Status {
|
||||
func (a alwaysDrain) Drainable(*drainability.DrainContext, *apiv1.Pod, *schedulerframework.NodeInfo) drainability.Status {
|
||||
return drainability.NewDrainableStatus()
|
||||
}
|
||||
|
||||
|
@ -804,7 +804,7 @@ func (n neverDrain) Name() string {
|
|||
return "NeverDrain"
|
||||
}
|
||||
|
||||
func (n neverDrain) Drainable(*drainability.DrainContext, *apiv1.Pod) drainability.Status {
|
||||
func (n neverDrain) Drainable(*drainability.DrainContext, *apiv1.Pod, *schedulerframework.NodeInfo) drainability.Status {
|
||||
return drainability.NewBlockedStatus(drain.UnexpectedError, fmt.Errorf("nope"))
|
||||
}
|
||||
|
||||
|
@ -814,6 +814,6 @@ func (c cantDecide) Name() string {
|
|||
return "CantDecide"
|
||||
}
|
||||
|
||||
func (c cantDecide) Drainable(*drainability.DrainContext, *apiv1.Pod) drainability.Status {
|
||||
func (c cantDecide) Drainable(*drainability.DrainContext, *apiv1.Pod, *schedulerframework.NodeInfo) drainability.Status {
|
||||
return drainability.NewUndefinedStatus()
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle daemon set pods.
|
||||
|
@ -36,7 +37,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with daemon set pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if pod_util.IsDaemonSetPod(pod) {
|
||||
return drainability.NewDrainableStatus()
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ func TestDrainable(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
got := New().Drainable(nil, tc.pod)
|
||||
got := New().Drainable(nil, tc.pod, nil)
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Rule.Drainable(%v): got status diff (-want +got):\n%s", tc.pod.Name, diff)
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle local storage pods.
|
||||
|
@ -38,7 +39,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with local storage pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if drain.HasBlockingLocalStorage(pod) {
|
||||
return drainability.NewBlockedStatus(drain.LocalStorageRequested, fmt.Errorf("pod with local storage present: %s", pod.Name))
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ func TestDrainable(t *testing.T) {
|
|||
drainCtx := &drainability.DrainContext{
|
||||
Timestamp: testTime,
|
||||
}
|
||||
status := New().Drainable(drainCtx, test.pod)
|
||||
status := New().Drainable(drainCtx, test.pod, nil)
|
||||
assert.Equal(t, test.wantReason, status.BlockingReason)
|
||||
assert.Equal(t, test.wantError, status.Error != nil)
|
||||
})
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle long terminating pods.
|
||||
|
@ -36,7 +37,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with long terminating pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if drain.IsPodLongTerminating(pod, drainCtx.Timestamp) {
|
||||
return drainability.NewSkipStatus()
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ func TestDrainable(t *testing.T) {
|
|||
drainCtx := &drainability.DrainContext{
|
||||
Timestamp: testTime,
|
||||
}
|
||||
got := New().Drainable(drainCtx, tc.pod)
|
||||
got := New().Drainable(drainCtx, tc.pod, nil)
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Rule.Drainable(%v): got status diff (-want +got):\n%s", tc.pod.Name, diff)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle mirror pods.
|
||||
|
@ -36,7 +37,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with mirror pods on node drain.
|
||||
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if pod_util.IsMirrorPod(pod) {
|
||||
return drainability.NewSkipStatus()
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func TestDrainable(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
got := New().Drainable(nil, tc.pod)
|
||||
got := New().Drainable(nil, tc.pod, nil)
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Rule.Drainable(%v): got status diff (-want +got):\n%s", tc.pod.Name, diff)
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle not safe to evict pods.
|
||||
|
@ -38,7 +39,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with not safe to evict pods on node drain.
|
||||
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if drain.HasNotSafeToEvictAnnotation(pod) {
|
||||
return drainability.NewBlockedStatus(drain.NotSafeToEvictAnnotation, fmt.Errorf("pod annotated as not safe to evict present: %s", pod.Name))
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ func TestDrainable(t *testing.T) {
|
|||
drainCtx := &drainability.DrainContext{
|
||||
Timestamp: testTime,
|
||||
}
|
||||
status := New().Drainable(drainCtx, test.pod)
|
||||
status := New().Drainable(drainCtx, test.pod, nil)
|
||||
assert.Equal(t, test.wantReason, status.BlockingReason)
|
||||
assert.Equal(t, test.wantError, status.Error != nil)
|
||||
})
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle pods with pdbs.
|
||||
|
@ -38,7 +39,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides how to handle pods with pdbs on node drain.
|
||||
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
for _, pdb := range drainCtx.RemainingPdbTracker.MatchingPdbs(pod) {
|
||||
if pdb.Status.DisruptionsAllowed < 1 {
|
||||
return drainability.NewBlockedStatus(drain.NotEnoughPdb, fmt.Errorf("not enough pod disruption budget to move %s/%s", pod.Namespace, pod.Name))
|
||||
|
|
|
@ -143,7 +143,7 @@ func TestDrainable(t *testing.T) {
|
|||
RemainingPdbTracker: tracker,
|
||||
}
|
||||
|
||||
got := New().Drainable(drainCtx, tc.pod)
|
||||
got := New().Drainable(drainCtx, tc.pod, nil)
|
||||
assert.Equal(t, tc.wantReason, got.BlockingReason)
|
||||
assert.Equal(t, tc.wantOutcome, got.Outcome)
|
||||
})
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle replicated pods.
|
||||
|
@ -45,7 +46,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with replicated pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if drainCtx.Listers == nil {
|
||||
return drainability.NewUndefinedStatus()
|
||||
}
|
||||
|
|
|
@ -311,7 +311,7 @@ func TestDrainable(t *testing.T) {
|
|||
Listers: registry,
|
||||
Timestamp: testTime,
|
||||
}
|
||||
status := New(0).Drainable(drainCtx, test.pod)
|
||||
status := New(0).Drainable(drainCtx, test.pod, nil)
|
||||
assert.Equal(t, test.wantReason, status.BlockingReason)
|
||||
assert.Equal(t, test.wantError, status.Error != nil)
|
||||
})
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle replicated pods.
|
||||
|
@ -42,7 +43,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with replicated pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
controllerRef := drain.ControllerRef(pod)
|
||||
replicated := controllerRef != nil
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ func TestDrainable(t *testing.T) {
|
|||
Listers: registry,
|
||||
Timestamp: testTime,
|
||||
}
|
||||
status := New(test.skipNodesWithCustomControllerPods).Drainable(drainCtx, test.pod)
|
||||
status := New(test.skipNodesWithCustomControllerPods).Drainable(drainCtx, test.pod, nil)
|
||||
assert.Equal(t, test.wantReason, status.BlockingReason)
|
||||
assert.Equal(t, test.wantError, status.Error != nil)
|
||||
})
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/terminal"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule determines whether a given pod can be drained or not.
|
||||
|
@ -43,7 +44,7 @@ type Rule interface {
|
|||
// the specific Rule.
|
||||
//
|
||||
// DrainContext cannot be nil.
|
||||
Drainable(*drainability.DrainContext, *apiv1.Pod) drainability.Status
|
||||
Drainable(*drainability.DrainContext, *apiv1.Pod, *framework.NodeInfo) drainability.Status
|
||||
}
|
||||
|
||||
// Default returns the default list of Rules.
|
||||
|
@ -81,7 +82,7 @@ type Rules []Rule
|
|||
|
||||
// Drainable determines whether a given pod is drainable according to the
|
||||
// specified set of rules.
|
||||
func (rs Rules) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (rs Rules) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, nodeInfo *framework.NodeInfo) drainability.Status {
|
||||
if drainCtx == nil {
|
||||
drainCtx = &drainability.DrainContext{}
|
||||
}
|
||||
|
@ -92,7 +93,7 @@ func (rs Rules) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) d
|
|||
var candidates []overrideCandidate
|
||||
|
||||
for _, r := range rs {
|
||||
status := r.Drainable(drainCtx, pod)
|
||||
status := r.Drainable(drainCtx, pod, nodeInfo)
|
||||
if len(status.Overrides) > 0 {
|
||||
candidates = append(candidates, overrideCandidate{r.Name(), status})
|
||||
continue
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
func TestDrainable(t *testing.T) {
|
||||
|
@ -111,7 +112,7 @@ func TestDrainable(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
got := tc.rules.Drainable(nil, &apiv1.Pod{})
|
||||
got := tc.rules.Drainable(nil, &apiv1.Pod{}, nil)
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Drainable(): got status diff (-want +got):\n%s", diff)
|
||||
}
|
||||
|
@ -127,6 +128,6 @@ func (r fakeRule) Name() string {
|
|||
return "FakeRule"
|
||||
}
|
||||
|
||||
func (r fakeRule) Drainable(*drainability.DrainContext, *apiv1.Pod) drainability.Status {
|
||||
func (r fakeRule) Drainable(*drainability.DrainContext, *apiv1.Pod, *framework.NodeInfo) drainability.Status {
|
||||
return r.status
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle safe to evict pods.
|
||||
|
@ -36,7 +37,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with safe to evict pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if drain.HasSafeToEvictAnnotation(pod) {
|
||||
return drainability.NewDrainableStatus()
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func TestDrainable(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
got := New().Drainable(nil, tc.pod)
|
||||
got := New().Drainable(nil, tc.pod, nil)
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Rule.Drainable(%v): got status diff (-want +got):\n%s", tc.pod.Name, diff)
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle system pods.
|
||||
|
@ -38,7 +39,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with system pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if pod.Namespace == "kube-system" && len(drainCtx.RemainingPdbTracker.MatchingPdbs(pod)) == 0 {
|
||||
return drainability.NewBlockedStatus(drain.UnmovableKubeSystemPod, fmt.Errorf("non-daemonset, non-mirrored, non-pdb-assigned kube-system pod present: %s", pod.Name))
|
||||
}
|
||||
|
|
|
@ -173,7 +173,7 @@ func TestDrainable(t *testing.T) {
|
|||
RemainingPdbTracker: tracker,
|
||||
Timestamp: testTime,
|
||||
}
|
||||
status := New().Drainable(drainCtx, test.pod)
|
||||
status := New().Drainable(drainCtx, test.pod, nil)
|
||||
assert.Equal(t, test.wantReason, status.BlockingReason)
|
||||
assert.Equal(t, test.wantError, status.Error != nil)
|
||||
})
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Rule is a drainability rule on how to handle terminal pods.
|
||||
|
@ -36,7 +37,7 @@ func (r *Rule) Name() string {
|
|||
}
|
||||
|
||||
// Drainable decides what to do with terminal pods on node drain.
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
|
||||
func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod, _ *framework.NodeInfo) drainability.Status {
|
||||
if drain.IsPodTerminal(pod) {
|
||||
return drainability.NewDrainableStatus()
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestDrainable(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
got := New().Drainable(nil, tc.pod)
|
||||
got := New().Drainable(nil, tc.pod, nil)
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Rule.Drainable(%v): got status diff (-want +got):\n%s", tc.pod.Name, diff)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue