Remove ScaleDown dependency on clusterStateRegistry

This commit is contained in:
Daniel Kłobuszewski 2022-09-01 16:15:29 +02:00
parent f445a6a887
commit 95fd1ed645
4 changed files with 11 additions and 14 deletions

View File

@ -22,7 +22,6 @@ import (
"time"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/eligibility"
@ -46,7 +45,6 @@ import (
type ScaleDown struct {
context *context.AutoscalingContext
processors *processors.AutoscalingProcessors
clusterStateRegistry *clusterstate.ClusterStateRegistry
unremovableNodes *unremovable.Nodes
unneededNodes *unneeded.Nodes
podLocationHints map[string]string
@ -59,7 +57,7 @@ type ScaleDown struct {
}
// NewScaleDown builds new ScaleDown object.
func NewScaleDown(context *context.AutoscalingContext, processors *processors.AutoscalingProcessors, clusterStateRegistry *clusterstate.ClusterStateRegistry, ndt *deletiontracker.NodeDeletionTracker, deleteOptions simulator.NodeDeleteOptions) *ScaleDown {
func NewScaleDown(context *context.AutoscalingContext, processors *processors.AutoscalingProcessors, ndt *deletiontracker.NodeDeletionTracker, deleteOptions simulator.NodeDeleteOptions) *ScaleDown {
usageTracker := simulator.NewUsageTracker()
removalSimulator := simulator.NewRemovalSimulator(context.ListerRegistry, context.ClusterSnapshot, context.PredicateChecker, usageTracker, deleteOptions, false)
unremovableNodes := unremovable.NewNodes()
@ -67,7 +65,6 @@ func NewScaleDown(context *context.AutoscalingContext, processors *processors.Au
return &ScaleDown{
context: context,
processors: processors,
clusterStateRegistry: clusterStateRegistry,
unremovableNodes: unremovableNodes,
unneededNodes: unneeded.NewNodes(processors.NodeGroupConfigProcessor, resourceLimitsFinder),
podLocationHints: make(map[string]string),
@ -219,9 +216,6 @@ func (sd *ScaleDown) UpdateUnneededNodes(
// Update state and metrics
sd.podLocationHints = newHints
sd.nodeUtilizationMap = utilizationMap
unneededNodesList := sd.unneededNodes.AsList()
sd.clusterStateRegistry.UpdateScaleDownCandidates(unneededNodesList, timestamp)
metrics.UpdateUnneededNodesCount(len(unneededNodesList))
return nil
}
@ -236,14 +230,13 @@ func (sd *ScaleDown) UnremovableNodes() []*simulator.UnremovableNode {
return sd.unremovableNodes.AsList()
}
// markSimulationError indicates a simulation error by clearing relevant scale
// markSimulationError indicates a simulation error by clearing relevant scale
// down state and returning an appropriate error.
func (sd *ScaleDown) markSimulationError(simulatorErr errors.AutoscalerError,
timestamp time.Time) errors.AutoscalerError {
klog.Errorf("Error while simulating node drains: %v", simulatorErr)
sd.unneededNodes.Clear()
sd.nodeUtilizationMap = make(map[string]utilization.Info)
sd.clusterStateRegistry.UpdateScaleDownCandidates(nil, timestamp)
return simulatorErr.AddPrefix("error while simulating node drains: ")
}

View File

@ -1289,13 +1289,12 @@ func newWrapperForTesting(ctx *context.AutoscalingContext, clusterStateRegistry
if ndt == nil {
ndt = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
}
deleteOptions := simulator.NodeDeleteOptions{
SkipNodesWithSystemPods: true,
SkipNodesWithLocalStorage: true,
MinReplicaCount: 0,
}
sd := NewScaleDown(ctx, NewTestProcessors(), clusterStateRegistry, ndt, deleteOptions)
sd := NewScaleDown(ctx, NewTestProcessors(), ndt, deleteOptions)
actuator := actuation.NewActuator(ctx, clusterStateRegistry, ndt, deleteOptions)
return NewScaleDownWrapper(sd, actuator)
}

View File

@ -164,7 +164,7 @@ func NewStaticAutoscaler(
}
ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second)
scaleDown := legacy.NewScaleDown(autoscalingContext, processors, clusterStateRegistry, ndt, deleteOptions)
scaleDown := legacy.NewScaleDown(autoscalingContext, processors, ndt, deleteOptions)
actuator := actuation.NewActuator(autoscalingContext, clusterStateRegistry, ndt, deleteOptions)
scaleDownWrapper := legacy.NewScaleDownWrapper(scaleDown, actuator)
processorCallbacks.scaleDownPlanner = scaleDownWrapper
@ -528,7 +528,12 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
}
actuationStatus := a.scaleDownActuator.CheckStatus()
if typedErr := a.scaleDownPlanner.UpdateClusterState(podDestinations, scaleDownCandidates, actuationStatus, pdbs, currentTime); typedErr != nil {
typedErr := a.scaleDownPlanner.UpdateClusterState(podDestinations, scaleDownCandidates, actuationStatus, pdbs, currentTime)
// Update clusterStateRegistry and metrics regardless of whether ScaleDown was successful or not.
unneededNodes := a.scaleDownPlanner.UnneededNodes()
a.clusterStateRegistry.UpdateScaleDownCandidates(unneededNodes, currentTime)
metrics.UpdateUnneededNodesCount(len(unneededNodes))
if typedErr != nil {
scaleDownStatus.Result = status.ScaleDownError
klog.Errorf("Failed to scale down: %v", typedErr)
return typedErr

View File

@ -1507,7 +1507,7 @@ func newScaleDownPlannerAndActuator(t *testing.T, ctx *context.AutoscalingContex
MinReplicaCount: 0,
}
ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second)
sd := legacy.NewScaleDown(ctx, p, cs, ndt, deleteOptions)
sd := legacy.NewScaleDown(ctx, p, ndt, deleteOptions)
actuator := actuation.NewActuator(ctx, cs, ndt, deleteOptions)
wrapper := legacy.NewScaleDownWrapper(sd, actuator)
return wrapper, wrapper