Merge pull request #6509 from x13n/master

Reduce log spam in AtomicResizeFilteringProcessor
This commit is contained in:
Kubernetes Prow Robot 2024-02-12 23:02:45 -08:00 committed by GitHub
commit abc077e222
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 21 additions and 2 deletions

View File

@ -20,6 +20,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
klog "k8s.io/klog/v2"
)
@ -83,6 +84,8 @@ type AtomicResizeFilteringProcessor struct {
// GetNodesToRemove selects up to maxCount nodes for deletion, by selecting a first maxCount candidates
func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.AutoscalingContext, candidates []simulator.NodeToBeRemoved, maxCount int) []simulator.NodeToBeRemoved {
atomicQuota := klogx.NodesLoggingQuota()
standardQuota := klogx.NodesLoggingQuota()
nodesByGroup := map[cloudprovider.NodeGroup][]simulator.NodeToBeRemoved{}
result := []simulator.NodeToBeRemoved{}
for _, node := range candidates {
@ -97,13 +100,15 @@ func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.Autoscali
continue
}
if autoscalingOptions != nil && autoscalingOptions.ZeroOrMaxNodeScaling {
klog.V(2).Infof("Considering node %s for atomic scale down", node.Node.Name)
klogx.V(2).UpTo(atomicQuota).Infof("Considering node %s for atomic scale down", node.Node.Name)
nodesByGroup[nodeGroup] = append(nodesByGroup[nodeGroup], node)
} else {
klog.V(2).Infof("Considering node %s for standard scale down", node.Node.Name)
klogx.V(2).UpTo(standardQuota).Infof("Considering node %s for standard scale down", node.Node.Name)
result = append(result, node)
}
}
klogx.V(2).Over(atomicQuota).Infof("Considering %d other nodes for atomic scale down", -atomicQuota.Left())
klogx.V(2).Over(standardQuota).Infof("Considering %d other nodes for standard scale down", -atomicQuota.Left())
for nodeGroup, nodes := range nodesByGroup {
ngSize, err := nodeGroup.TargetSize()
if err != nil {

View File

@ -25,6 +25,12 @@ const (
// MaxPodsLoggedV5 is the maximum number of pods for which we will
// log detailed information every loop at verbosity >= 5.
MaxPodsLoggedV5 = 1000
// MaxNodesLogged is the maximum number of nodes for which we will
// log detailed information every loop at verbosity < 5.
MaxNodesLogged = 20
// MaxNodesLoggedV5 is the maximum number of nodes for which we will
// log detailed information every loop at verbosity >= 5.
MaxNodesLoggedV5 = 1000
)
// PodsLoggingQuota returns a new quota with default limit for pods at current verbosity.
@ -34,3 +40,11 @@ func PodsLoggingQuota() *Quota {
}
return NewLoggingQuota(MaxPodsLogged)
}
// NodesLoggingQuota returns a new quota with default limit for nodes at current verbosity.
func NodesLoggingQuota() *Quota {
if klog.V(5).Enabled() {
return NewLoggingQuota(MaxNodesLoggedV5)
}
return NewLoggingQuota(MaxNodesLogged)
}