Merge pull request #6509 from x13n/master
Reduce log spam in AtomicResizeFilteringProcessor
This commit is contained in:
commit
abc077e222
|
|
@ -20,6 +20,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
|
@ -83,6 +84,8 @@ type AtomicResizeFilteringProcessor struct {
|
|||
|
||||
// GetNodesToRemove selects up to maxCount nodes for deletion, by selecting a first maxCount candidates
|
||||
func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.AutoscalingContext, candidates []simulator.NodeToBeRemoved, maxCount int) []simulator.NodeToBeRemoved {
|
||||
atomicQuota := klogx.NodesLoggingQuota()
|
||||
standardQuota := klogx.NodesLoggingQuota()
|
||||
nodesByGroup := map[cloudprovider.NodeGroup][]simulator.NodeToBeRemoved{}
|
||||
result := []simulator.NodeToBeRemoved{}
|
||||
for _, node := range candidates {
|
||||
|
|
@ -97,13 +100,15 @@ func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.Autoscali
|
|||
continue
|
||||
}
|
||||
if autoscalingOptions != nil && autoscalingOptions.ZeroOrMaxNodeScaling {
|
||||
klog.V(2).Infof("Considering node %s for atomic scale down", node.Node.Name)
|
||||
klogx.V(2).UpTo(atomicQuota).Infof("Considering node %s for atomic scale down", node.Node.Name)
|
||||
nodesByGroup[nodeGroup] = append(nodesByGroup[nodeGroup], node)
|
||||
} else {
|
||||
klog.V(2).Infof("Considering node %s for standard scale down", node.Node.Name)
|
||||
klogx.V(2).UpTo(standardQuota).Infof("Considering node %s for standard scale down", node.Node.Name)
|
||||
result = append(result, node)
|
||||
}
|
||||
}
|
||||
klogx.V(2).Over(atomicQuota).Infof("Considering %d other nodes for atomic scale down", -atomicQuota.Left())
|
||||
klogx.V(2).Over(standardQuota).Infof("Considering %d other nodes for standard scale down", -atomicQuota.Left())
|
||||
for nodeGroup, nodes := range nodesByGroup {
|
||||
ngSize, err := nodeGroup.TargetSize()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -25,6 +25,12 @@ const (
|
|||
// MaxPodsLoggedV5 is the maximum number of pods for which we will
|
||||
// log detailed information every loop at verbosity >= 5.
|
||||
MaxPodsLoggedV5 = 1000
|
||||
// MaxNodesLogged is the maximum number of nodes for which we will
|
||||
// log detailed information every loop at verbosity < 5.
|
||||
MaxNodesLogged = 20
|
||||
// MaxNodesLoggedV5 is the maximum number of nodes for which we will
|
||||
// log detailed information every loop at verbosity >= 5.
|
||||
MaxNodesLoggedV5 = 1000
|
||||
)
|
||||
|
||||
// PodsLoggingQuota returns a new quota with default limit for pods at current verbosity.
|
||||
|
|
@ -34,3 +40,11 @@ func PodsLoggingQuota() *Quota {
|
|||
}
|
||||
return NewLoggingQuota(MaxPodsLogged)
|
||||
}
|
||||
|
||||
// NodesLoggingQuota returns a new quota with default limit for nodes at current verbosity.
|
||||
func NodesLoggingQuota() *Quota {
|
||||
if klog.V(5).Enabled() {
|
||||
return NewLoggingQuota(MaxNodesLoggedV5)
|
||||
}
|
||||
return NewLoggingQuota(MaxNodesLogged)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue