Fix drain logic when skipNodesWithCustomControllerPods=false, set NodeDeleteOptions correctly

This commit is contained in:
Bartłomiej Wróblewski 2023-04-04 09:40:19 +00:00
parent 70fd890ebc
commit d5d0a3c7b7
9 changed files with 37 additions and 38 deletions

View File

@ -228,9 +228,10 @@ func TestCropNodesToBudgets(t *testing.T) {
}, },
} }
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NodeDeleteOptions{
SkipNodesWithSystemPods: true, SkipNodesWithSystemPods: true,
SkipNodesWithLocalStorage: true, SkipNodesWithLocalStorage: true,
MinReplicaCount: 0, MinReplicaCount: 0,
SkipNodesWithCustomControllerPods: true,
} }
ndr := deletiontracker.NewNodeDeletionTracker(1 * time.Hour) ndr := deletiontracker.NewNodeDeletionTracker(1 * time.Hour)
for i := 0; i < tc.emptyDeletionsInProgress; i++ { for i := 0; i < tc.emptyDeletionsInProgress; i++ {

View File

@ -1287,9 +1287,10 @@ func newWrapperForTesting(ctx *context.AutoscalingContext, clusterStateRegistry
ndt = deletiontracker.NewNodeDeletionTracker(0 * time.Second) ndt = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
} }
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NodeDeleteOptions{
SkipNodesWithSystemPods: true, SkipNodesWithSystemPods: true,
SkipNodesWithLocalStorage: true, SkipNodesWithLocalStorage: true,
MinReplicaCount: 0, MinReplicaCount: 0,
SkipNodesWithCustomControllerPods: true,
} }
sd := NewScaleDown(ctx, NewTestProcessors(ctx), ndt, deleteOptions) sd := NewScaleDown(ctx, NewTestProcessors(ctx), ndt, deleteOptions)
actuator := actuation.NewActuator(ctx, clusterStateRegistry, ndt, deleteOptions) actuator := actuation.NewActuator(ctx, clusterStateRegistry, ndt, deleteOptions)

View File

@ -170,12 +170,7 @@ func NewStaticAutoscaler(
clusterStateRegistry := clusterstate.NewClusterStateRegistry(autoscalingContext.CloudProvider, clusterStateConfig, autoscalingContext.LogRecorder, backoff) clusterStateRegistry := clusterstate.NewClusterStateRegistry(autoscalingContext.CloudProvider, clusterStateConfig, autoscalingContext.LogRecorder, backoff)
processors.ScaleDownCandidatesNotifier.Register(clusterStateRegistry) processors.ScaleDownCandidatesNotifier.Register(clusterStateRegistry)
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NewNodeDeleteOptions(opts)
SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods,
SkipNodesWithLocalStorage: opts.SkipNodesWithLocalStorage,
MinReplicaCount: opts.MinReplicaCount,
SkipNodesWithCustomControllerPods: opts.SkipNodesWithCustomControllerPods,
}
// TODO: Populate the ScaleDownActuator/Planner fields in AutoscalingContext // TODO: Populate the ScaleDownActuator/Planner fields in AutoscalingContext
// during the struct creation rather than here. // during the struct creation rather than here.

View File

@ -148,11 +148,7 @@ func (m *onNodeGroupDeleteMock) Delete(id string) error {
} }
func setUpScaleDownActuator(ctx *context.AutoscalingContext, options config.AutoscalingOptions) { func setUpScaleDownActuator(ctx *context.AutoscalingContext, options config.AutoscalingOptions) {
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NewNodeDeleteOptions(options)
SkipNodesWithSystemPods: options.SkipNodesWithSystemPods,
SkipNodesWithLocalStorage: options.SkipNodesWithLocalStorage,
MinReplicaCount: options.MinReplicaCount,
}
ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions) ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions)
} }
@ -1727,9 +1723,10 @@ func newScaleDownPlannerAndActuator(t *testing.T, ctx *context.AutoscalingContex
ctx.NodeDeletionBatcherInterval = 0 * time.Second ctx.NodeDeletionBatcherInterval = 0 * time.Second
ctx.NodeDeleteDelayAfterTaint = 1 * time.Second ctx.NodeDeleteDelayAfterTaint = 1 * time.Second
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NodeDeleteOptions{
SkipNodesWithSystemPods: true, SkipNodesWithSystemPods: true,
SkipNodesWithLocalStorage: true, SkipNodesWithLocalStorage: true,
MinReplicaCount: 0, MinReplicaCount: 0,
SkipNodesWithCustomControllerPods: true,
} }
ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second) ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second)
sd := legacy.NewScaleDown(ctx, p, ndt, deleteOptions) sd := legacy.NewScaleDown(ctx, p, ndt, deleteOptions)

View File

@ -51,11 +51,7 @@ type EmptySorting struct {
// NewEmptySortingProcessor return EmptySorting struct. // NewEmptySortingProcessor return EmptySorting struct.
func NewEmptySortingProcessor(opts *config.AutoscalingOptions, n nodeInfoGetter) *EmptySorting { func NewEmptySortingProcessor(opts *config.AutoscalingOptions, n nodeInfoGetter) *EmptySorting {
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NewNodeDeleteOptions(*opts)
SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods,
SkipNodesWithLocalStorage: opts.SkipNodesWithLocalStorage,
MinReplicaCount: opts.MinReplicaCount,
}
return &EmptySorting{n, deleteOptions} return &EmptySorting{n, deleteOptions}
} }

View File

@ -63,9 +63,10 @@ func TestScaleDownEarlierThan(t *testing.T) {
niGetter := testNodeInfoGetter{map[string]*schedulerframework.NodeInfo{nodeEmptyName: niEmpty, nodeNonEmptyName: niNonEmpty, nodeEmptyName2: niEmpty2}} niGetter := testNodeInfoGetter{map[string]*schedulerframework.NodeInfo{nodeEmptyName: niEmpty, nodeNonEmptyName: niNonEmpty, nodeEmptyName2: niEmpty2}}
deleteOptions := simulator.NodeDeleteOptions{ deleteOptions := simulator.NodeDeleteOptions{
SkipNodesWithSystemPods: true, SkipNodesWithSystemPods: true,
SkipNodesWithLocalStorage: true, SkipNodesWithLocalStorage: true,
MinReplicaCount: 0, MinReplicaCount: 0,
SkipNodesWithCustomControllerPods: true,
} }
p := EmptySorting{&niGetter, deleteOptions} p := EmptySorting{&niGetter, deleteOptions}

View File

@ -217,8 +217,9 @@ func TestFindNodesToRemove(t *testing.T) {
func testDeleteOptions() NodeDeleteOptions { func testDeleteOptions() NodeDeleteOptions {
return NodeDeleteOptions{ return NodeDeleteOptions{
SkipNodesWithSystemPods: true, SkipNodesWithSystemPods: true,
SkipNodesWithLocalStorage: true, SkipNodesWithLocalStorage: true,
MinReplicaCount: 0, MinReplicaCount: 0,
SkipNodesWithCustomControllerPods: true,
} }
} }

View File

@ -24,6 +24,7 @@ import (
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain" "k8s.io/autoscaler/cluster-autoscaler/utils/drain"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
@ -42,6 +43,16 @@ type NodeDeleteOptions struct {
MinReplicaCount int MinReplicaCount int
} }
// NewNodeDeleteOptions returns new node delete options extracted from autoscaling options
func NewNodeDeleteOptions(opts config.AutoscalingOptions) NodeDeleteOptions {
return NodeDeleteOptions{
SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods,
SkipNodesWithLocalStorage: opts.SkipNodesWithLocalStorage,
MinReplicaCount: opts.MinReplicaCount,
SkipNodesWithCustomControllerPods: opts.SkipNodesWithCustomControllerPods,
}
}
// GetPodsToMove returns a list of pods that should be moved elsewhere // GetPodsToMove returns a list of pods that should be moved elsewhere
// and a list of DaemonSet pods that should be evicted if the node // and a list of DaemonSet pods that should be evicted if the node
// is drained. Raises error if there is an unreplicated pod. // is drained. Raises error if there is an unreplicated pod.

View File

@ -93,7 +93,6 @@ func GetPodsForDeletionOnNodeDrain(
} }
} }
isDaemonSetPod := false
for _, pod := range podList { for _, pod := range podList {
if pod_util.IsMirrorPod(pod) { if pod_util.IsMirrorPod(pod) {
continue continue
@ -107,6 +106,7 @@ func GetPodsForDeletionOnNodeDrain(
continue continue
} }
isDaemonSetPod := false
replicated := false replicated := false
safeToEvict := hasSafeToEvictAnnotation(pod) safeToEvict := hasSafeToEvictAnnotation(pod)
terminal := isPodTerminal(pod) terminal := isPodTerminal(pod)
@ -118,12 +118,8 @@ func GetPodsForDeletionOnNodeDrain(
return []*apiv1.Pod{}, []*apiv1.Pod{}, blockingPod, err return []*apiv1.Pod{}, []*apiv1.Pod{}, blockingPod, err
} }
} else { } else {
if ControllerRef(pod) != nil { replicated = ControllerRef(pod) != nil
replicated = true isDaemonSetPod = pod_util.IsDaemonSetPod(pod)
}
if pod_util.IsDaemonSetPod(pod) {
isDaemonSetPod = true
}
} }
if isDaemonSetPod { if isDaemonSetPod {