From c255aaa2a9ef2af0b52b749959bffdc9975c9518 Mon Sep 17 00:00:00 2001 From: Hakan Bostan Date: Wed, 31 May 2023 15:03:11 +0000 Subject: [PATCH] Use appropriate logging levels --- cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go index 9aab1f0334..24487f5832 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go @@ -448,7 +448,7 @@ func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups( } if autoscalingOptions != nil && autoscalingOptions.AtomicScaleUp { if o.autoscalingContext.MaxNodesTotal != 0 && currentNodeCount+nodeGroup.MaxSize() > o.autoscalingContext.MaxNodesTotal { - klog.Errorf("Skipping node group %s - atomic scale-up exceetds cluster node count limit", nodeGroup.Id()) + klog.V(4).Infof("Skipping node group %s - atomic scale-up exceetds cluster node count limit", nodeGroup.Id()) skippedNodeGroups[nodeGroup.Id()] = NewSkippedReasons("atomic scale-up exceeds cluster node count limit") continue } @@ -505,7 +505,6 @@ func (o *ScaleUpOrchestrator) ComputeExpansionOption( estimator := estimator.NewBinpackingNodeEstimator(o.autoscalingContext.PredicateChecker, o.autoscalingContext.ClusterSnapshot, limiter, estimator.NewDecreasingPodOrderer()) option.NodeCount, option.Pods = estimator.Estimate(pods, nodeInfo, nodeGroup) if option.NodeCount != nodeGroup.MaxSize() { - klog.Warningf("Estimator NodeCount: %d setting to %d", option.NodeCount, nodeGroup.MaxSize()) option.NodeCount = nodeGroup.MaxSize() } return option