Create a Planner object if --parallelDrain=true

This commit is contained in:
Yaroslava Serdiuk 2022-12-06 11:16:45 +00:00
parent df627e22ac
commit ae45571af9
4 changed files with 23 additions and 6 deletions

View File

@ -211,4 +211,6 @@ type AutoscalingOptions struct {
MinReplicaCount int
// NodeDeleteDelayAfterTaint is the duration to wait before deleting a node after tainting it
NodeDeleteDelayAfterTaint time.Duration
// ParallelDrain is whether CA can drain nodes in parallel.
ParallelDrain bool
}

View File

@ -124,7 +124,7 @@ func (p *Planner) CleanUpUnneededNodes() {
// NodesToDelete returns all Nodes that could be removed right now, according
// to the Planner.
func (p *Planner) NodesToDelete() (empty, needDrain []*apiv1.Node) {
func (p *Planner) NodesToDelete(_ time.Time) (empty, needDrain []*apiv1.Node) {
nodes, err := allNodes(p.context.ClusterSnapshot)
if err != nil {
klog.Errorf("Nothing will scale down, failed to list nodes from ClusterSnapshot: %v", err)

View File

@ -23,6 +23,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/planner"
scaledownstatus "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
@ -175,9 +176,18 @@ func NewStaticAutoscaler(
actuator := actuation.NewActuator(autoscalingContext, clusterStateRegistry, ndt, deleteOptions)
autoscalingContext.ScaleDownActuator = actuator
// TODO: Remove the wrapper once the legacy implementation becomes obsolete.
scaleDownWrapper := legacy.NewScaleDownWrapper(scaleDown, actuator)
processorCallbacks.scaleDownPlanner = scaleDownWrapper
var scaleDownPlanner scaledown.Planner
var scaleDownActuator scaledown.Actuator
if opts.ParallelDrain {
scaleDownPlanner = planner.New(autoscalingContext, processors, deleteOptions)
scaleDownActuator = actuator
} else {
// TODO: Remove the wrapper once the legacy implementation becomes obsolete.
scaleDownWrapper := legacy.NewScaleDownWrapper(scaleDown, actuator)
scaleDownPlanner = scaleDownWrapper
scaleDownActuator = scaleDownWrapper
}
processorCallbacks.scaleDownPlanner = scaleDownPlanner
scaleUpResourceManager := scaleup.NewResourceManager(processors.CustomResourcesProcessor)
@ -189,8 +199,8 @@ func NewStaticAutoscaler(
lastScaleUpTime: initialScaleTime,
lastScaleDownDeleteTime: initialScaleTime,
lastScaleDownFailTime: initialScaleTime,
scaleDownPlanner: scaleDownWrapper,
scaleDownActuator: scaleDownWrapper,
scaleDownPlanner: scaleDownPlanner,
scaleDownActuator: scaleDownActuator,
scaleUpResourceManager: scaleUpResourceManager,
processors: processors,
processorCallbacks: processorCallbacks,

View File

@ -212,6 +212,7 @@ var (
minReplicaCount = flag.Int("min-replica-count", 0, "Minimum number or replicas that a replica set or replication controller should have to allow their pods deletion in scale down")
nodeDeleteDelayAfterTaint = flag.Duration("node-delete-delay-after-taint", 5*time.Second, "How long to wait before deleting a node after tainting it")
scaleDownSimulationTimeout = flag.Duration("scale-down-simulation-timeout", 5*time.Minute, "How long should we run scale down simulation.")
parallelDrain = flag.Bool("parallel-drain", false, "Whether to allow parallel drain of nodes.")
)
func createAutoscalingOptions() config.AutoscalingOptions {
@ -231,6 +232,9 @@ func createAutoscalingOptions() config.AutoscalingOptions {
if err != nil {
klog.Fatalf("Failed to parse flags: %v", err)
}
if *maxDrainParallelismFlag > 1 && !*parallelDrain {
klog.Fatalf("Invalid configuration, could not use --max-drain-parallelism > 1 if --parallel-drain is false")
}
return config.AutoscalingOptions{
NodeGroupDefaults: config.NodeGroupAutoscalingOptions{
ScaleDownUtilizationThreshold: *scaleDownUtilizationThreshold,
@ -309,6 +313,7 @@ func createAutoscalingOptions() config.AutoscalingOptions {
MinReplicaCount: *minReplicaCount,
NodeDeleteDelayAfterTaint: *nodeDeleteDelayAfterTaint,
ScaleDownSimulationTimeout: *scaleDownSimulationTimeout,
ParallelDrain: *parallelDrain,
}
}