Fix drain logic when skipNodesWithCustomControllerPods=false, set NodeDeleteOptions correctly
This commit is contained in:
parent
70fd890ebc
commit
d5d0a3c7b7
|
|
@ -228,9 +228,10 @@ func TestCropNodesToBudgets(t *testing.T) {
|
|||
},
|
||||
}
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithCustomControllerPods: true,
|
||||
}
|
||||
ndr := deletiontracker.NewNodeDeletionTracker(1 * time.Hour)
|
||||
for i := 0; i < tc.emptyDeletionsInProgress; i++ {
|
||||
|
|
|
|||
|
|
@ -1287,9 +1287,10 @@ func newWrapperForTesting(ctx *context.AutoscalingContext, clusterStateRegistry
|
|||
ndt = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
|
||||
}
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithCustomControllerPods: true,
|
||||
}
|
||||
sd := NewScaleDown(ctx, NewTestProcessors(ctx), ndt, deleteOptions)
|
||||
actuator := actuation.NewActuator(ctx, clusterStateRegistry, ndt, deleteOptions)
|
||||
|
|
|
|||
|
|
@ -170,12 +170,7 @@ func NewStaticAutoscaler(
|
|||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(autoscalingContext.CloudProvider, clusterStateConfig, autoscalingContext.LogRecorder, backoff)
|
||||
processors.ScaleDownCandidatesNotifier.Register(clusterStateRegistry)
|
||||
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods,
|
||||
SkipNodesWithLocalStorage: opts.SkipNodesWithLocalStorage,
|
||||
MinReplicaCount: opts.MinReplicaCount,
|
||||
SkipNodesWithCustomControllerPods: opts.SkipNodesWithCustomControllerPods,
|
||||
}
|
||||
deleteOptions := simulator.NewNodeDeleteOptions(opts)
|
||||
|
||||
// TODO: Populate the ScaleDownActuator/Planner fields in AutoscalingContext
|
||||
// during the struct creation rather than here.
|
||||
|
|
|
|||
|
|
@ -148,11 +148,7 @@ func (m *onNodeGroupDeleteMock) Delete(id string) error {
|
|||
}
|
||||
|
||||
func setUpScaleDownActuator(ctx *context.AutoscalingContext, options config.AutoscalingOptions) {
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: options.SkipNodesWithSystemPods,
|
||||
SkipNodesWithLocalStorage: options.SkipNodesWithLocalStorage,
|
||||
MinReplicaCount: options.MinReplicaCount,
|
||||
}
|
||||
deleteOptions := simulator.NewNodeDeleteOptions(options)
|
||||
ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions)
|
||||
}
|
||||
|
||||
|
|
@ -1727,9 +1723,10 @@ func newScaleDownPlannerAndActuator(t *testing.T, ctx *context.AutoscalingContex
|
|||
ctx.NodeDeletionBatcherInterval = 0 * time.Second
|
||||
ctx.NodeDeleteDelayAfterTaint = 1 * time.Second
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithCustomControllerPods: true,
|
||||
}
|
||||
ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second)
|
||||
sd := legacy.NewScaleDown(ctx, p, ndt, deleteOptions)
|
||||
|
|
|
|||
|
|
@ -51,11 +51,7 @@ type EmptySorting struct {
|
|||
|
||||
// NewEmptySortingProcessor return EmptySorting struct.
|
||||
func NewEmptySortingProcessor(opts *config.AutoscalingOptions, n nodeInfoGetter) *EmptySorting {
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods,
|
||||
SkipNodesWithLocalStorage: opts.SkipNodesWithLocalStorage,
|
||||
MinReplicaCount: opts.MinReplicaCount,
|
||||
}
|
||||
deleteOptions := simulator.NewNodeDeleteOptions(*opts)
|
||||
return &EmptySorting{n, deleteOptions}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,9 +63,10 @@ func TestScaleDownEarlierThan(t *testing.T) {
|
|||
niGetter := testNodeInfoGetter{map[string]*schedulerframework.NodeInfo{nodeEmptyName: niEmpty, nodeNonEmptyName: niNonEmpty, nodeEmptyName2: niEmpty2}}
|
||||
|
||||
deleteOptions := simulator.NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithCustomControllerPods: true,
|
||||
}
|
||||
p := EmptySorting{&niGetter, deleteOptions}
|
||||
|
||||
|
|
|
|||
|
|
@ -217,8 +217,9 @@ func TestFindNodesToRemove(t *testing.T) {
|
|||
|
||||
func testDeleteOptions() NodeDeleteOptions {
|
||||
return NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithSystemPods: true,
|
||||
SkipNodesWithLocalStorage: true,
|
||||
MinReplicaCount: 0,
|
||||
SkipNodesWithCustomControllerPods: true,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
|
@ -42,6 +43,16 @@ type NodeDeleteOptions struct {
|
|||
MinReplicaCount int
|
||||
}
|
||||
|
||||
// NewNodeDeleteOptions returns new node delete options extracted from autoscaling options
|
||||
func NewNodeDeleteOptions(opts config.AutoscalingOptions) NodeDeleteOptions {
|
||||
return NodeDeleteOptions{
|
||||
SkipNodesWithSystemPods: opts.SkipNodesWithSystemPods,
|
||||
SkipNodesWithLocalStorage: opts.SkipNodesWithLocalStorage,
|
||||
MinReplicaCount: opts.MinReplicaCount,
|
||||
SkipNodesWithCustomControllerPods: opts.SkipNodesWithCustomControllerPods,
|
||||
}
|
||||
}
|
||||
|
||||
// GetPodsToMove returns a list of pods that should be moved elsewhere
|
||||
// and a list of DaemonSet pods that should be evicted if the node
|
||||
// is drained. Raises error if there is an unreplicated pod.
|
||||
|
|
|
|||
|
|
@ -93,7 +93,6 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
}
|
||||
}
|
||||
|
||||
isDaemonSetPod := false
|
||||
for _, pod := range podList {
|
||||
if pod_util.IsMirrorPod(pod) {
|
||||
continue
|
||||
|
|
@ -107,6 +106,7 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
continue
|
||||
}
|
||||
|
||||
isDaemonSetPod := false
|
||||
replicated := false
|
||||
safeToEvict := hasSafeToEvictAnnotation(pod)
|
||||
terminal := isPodTerminal(pod)
|
||||
|
|
@ -118,12 +118,8 @@ func GetPodsForDeletionOnNodeDrain(
|
|||
return []*apiv1.Pod{}, []*apiv1.Pod{}, blockingPod, err
|
||||
}
|
||||
} else {
|
||||
if ControllerRef(pod) != nil {
|
||||
replicated = true
|
||||
}
|
||||
if pod_util.IsDaemonSetPod(pod) {
|
||||
isDaemonSetPod = true
|
||||
}
|
||||
replicated = ControllerRef(pod) != nil
|
||||
isDaemonSetPod = pod_util.IsDaemonSetPod(pod)
|
||||
}
|
||||
|
||||
if isDaemonSetPod {
|
||||
|
|
|
|||
Loading…
Reference in New Issue