Merge pull request #7954 from abdelrahman882/FixScaledownCoolDown
Fix cool down status condition to trigger scale down
This commit is contained in:
commit
52cd68a498
|
@ -627,6 +627,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
||||||
|
|
||||||
if scaleDownInCooldown {
|
if scaleDownInCooldown {
|
||||||
scaleDownStatus.Result = scaledownstatus.ScaleDownInCooldown
|
scaleDownStatus.Result = scaledownstatus.ScaleDownInCooldown
|
||||||
|
a.updateSoftDeletionTaints(allNodes)
|
||||||
} else {
|
} else {
|
||||||
klog.V(4).Infof("Starting scale down")
|
klog.V(4).Infof("Starting scale down")
|
||||||
|
|
||||||
|
@ -645,21 +646,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
||||||
a.lastScaleDownDeleteTime = currentTime
|
a.lastScaleDownDeleteTime = currentTime
|
||||||
a.clusterStateRegistry.Recalculate()
|
a.clusterStateRegistry.Recalculate()
|
||||||
}
|
}
|
||||||
|
a.updateSoftDeletionTaints(allNodes)
|
||||||
if scaleDownStatus.Result == scaledownstatus.ScaleDownNoNodeDeleted &&
|
|
||||||
a.AutoscalingContext.AutoscalingOptions.MaxBulkSoftTaintCount != 0 {
|
|
||||||
taintableNodes := a.scaleDownPlanner.UnneededNodes()
|
|
||||||
|
|
||||||
// Make sure we are only cleaning taints from selected node groups.
|
|
||||||
selectedNodes := filterNodesFromSelectedGroups(a.CloudProvider, allNodes...)
|
|
||||||
|
|
||||||
// This is a sanity check to make sure `taintableNodes` only includes
|
|
||||||
// nodes from selected nodes.
|
|
||||||
taintableNodes = intersectNodes(selectedNodes, taintableNodes)
|
|
||||||
untaintableNodes := subtractNodes(selectedNodes, taintableNodes)
|
|
||||||
actuation.UpdateSoftDeletionTaints(a.AutoscalingContext, taintableNodes, untaintableNodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if typedErr != nil {
|
if typedErr != nil {
|
||||||
klog.Errorf("Failed to scale down: %v", typedErr)
|
klog.Errorf("Failed to scale down: %v", typedErr)
|
||||||
a.lastScaleDownFailTime = currentTime
|
a.lastScaleDownFailTime = currentTime
|
||||||
|
@ -679,6 +666,21 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *StaticAutoscaler) updateSoftDeletionTaints(allNodes []*apiv1.Node) {
|
||||||
|
if a.AutoscalingContext.AutoscalingOptions.MaxBulkSoftTaintCount != 0 {
|
||||||
|
taintableNodes := a.scaleDownPlanner.UnneededNodes()
|
||||||
|
|
||||||
|
// Make sure we are only cleaning taints from selected node groups.
|
||||||
|
selectedNodes := filterNodesFromSelectedGroups(a.CloudProvider, allNodes...)
|
||||||
|
|
||||||
|
// This is a sanity check to make sure `taintableNodes` only includes
|
||||||
|
// nodes from selected nodes.
|
||||||
|
taintableNodes = intersectNodes(selectedNodes, taintableNodes)
|
||||||
|
untaintableNodes := subtractNodes(selectedNodes, taintableNodes)
|
||||||
|
actuation.UpdateSoftDeletionTaints(a.AutoscalingContext, taintableNodes, untaintableNodes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*framework.NodeInfo) error {
|
func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*framework.NodeInfo) error {
|
||||||
nodeGroups := a.nodeGroupsById()
|
nodeGroups := a.nodeGroupsById()
|
||||||
upcomingNodeGroups := make(map[string]int)
|
upcomingNodeGroups := make(map[string]int)
|
||||||
|
|
Loading…
Reference in New Issue