Fix klog formating directives in cluster-autoscaler package.
This commit is contained in:
parent
db5e83bc1a
commit
4470430007
|
|
@ -540,7 +540,7 @@ func (mgr *cherryManagerRest) deleteServer(ctx context.Context, nodegroup string
|
|||
return err
|
||||
}
|
||||
|
||||
klog.Infof("Deleted server %s: %v", id, result)
|
||||
klog.Infof("Deleted server %d: %v", id, result)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
|
@ -591,10 +591,10 @@ func (mgr *cherryManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd
|
|||
|
||||
switch {
|
||||
case s.Hostname == n.Name:
|
||||
klog.V(1).Infof("Matching Cherry Server %s - %s", s.Hostname, s.ID)
|
||||
klog.V(1).Infof("Matching Cherry Server %s - %d", s.Hostname, s.ID)
|
||||
errList = append(errList, mgr.deleteServer(ctx, nodegroup, s.ID))
|
||||
case fakeNode && int(nodeID) == s.ID:
|
||||
klog.V(1).Infof("Fake Node %s", s.ID)
|
||||
klog.V(1).Infof("Fake Node %d", s.ID)
|
||||
errList = append(errList, mgr.deleteServer(ctx, nodegroup, s.ID))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ func (n *NodeGroup) IncreaseSize(delta int) error {
|
|||
func (n *NodeGroup) DeleteNodes(nodes []*apiv1.Node) error {
|
||||
for _, node := range nodes {
|
||||
instanceID := toNodeID(node.Spec.ProviderID)
|
||||
klog.V(4).Info("deleteing node: %q", instanceID)
|
||||
klog.V(4).Infof("deleteing node: %q", instanceID)
|
||||
_, err := n.client.DeleteKubernetesClusterPoolInstance(n.clusterID, n.id, instanceID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting node failed for cluster: %q node pool: %q node: %q: %s",
|
||||
|
|
|
|||
|
|
@ -18,12 +18,13 @@ package clusterapi
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/klog/v2"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -335,7 +336,7 @@ func GetDefaultScaleFromZeroArchitecture() SystemArchitecture {
|
|||
once.Do(func() {
|
||||
archStr := os.Getenv(scaleUpFromZeroDefaultArchEnvVar)
|
||||
arch := SystemArchitectureFromString(archStr)
|
||||
klog.V(5).Infof("the default scale from zero architecture value is set to %s (%s)", scaleUpFromZeroDefaultArchEnvVar, archStr, arch.Name())
|
||||
klog.V(5).Infof("the default scale from zero architecture value is set to %s (%s)", archStr, arch.Name())
|
||||
if arch == UnknownArch {
|
||||
arch = DefaultArch
|
||||
klog.Errorf("Unrecognized architecture '%s', falling back to %s",
|
||||
|
|
|
|||
|
|
@ -336,7 +336,7 @@ func (m *gceManagerImpl) refreshAutoscalingOptions() {
|
|||
for _, mig := range m.migLister.GetMigs() {
|
||||
template, err := m.migInfoProvider.GetMigInstanceTemplate(mig.GceRef())
|
||||
if err != nil {
|
||||
klog.Warningf("Not evaluating autoscaling options for %q MIG: failed to find corresponding instance template", mig.GceRef(), err)
|
||||
klog.Warningf("Not evaluating autoscaling options for %q MIG: failed to find corresponding instance template: %v", mig.GceRef(), err)
|
||||
continue
|
||||
}
|
||||
if template.Properties == nil {
|
||||
|
|
|
|||
|
|
@ -485,7 +485,7 @@ func extractKubeReservedFromKubeEnv(kubeEnv string) (string, error) {
|
|||
func extractExtendedResourcesFromKubeEnv(kubeEnvValue string) (apiv1.ResourceList, error) {
|
||||
extendedResourcesAsString, found, err := extractAutoscalerVarFromKubeEnv(kubeEnvValue, "extended_resources")
|
||||
if err != nil {
|
||||
klog.Warning("error while obtaining extended_resources from AUTOSCALER_ENV_VARS; %v", err)
|
||||
klog.Warningf("error while obtaining extended_resources from AUTOSCALER_ENV_VARS; %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -503,7 +503,7 @@ func extractExtendedResourcesFromKubeEnv(kubeEnvValue string) (apiv1.ResourceLis
|
|||
if q, err := resource.ParseQuantity(quantity); err == nil && q.Sign() >= 0 {
|
||||
extendedResources[apiv1.ResourceName(name)] = q
|
||||
} else if err != nil {
|
||||
klog.Warning("ignoring invalid value in extended_resources defined in AUTOSCALER_ENV_VARS; %v", err)
|
||||
klog.Warningf("ignoring invalid value in extended_resources defined in AUTOSCALER_ENV_VARS; %v", err)
|
||||
}
|
||||
}
|
||||
return extendedResources, nil
|
||||
|
|
@ -739,7 +739,7 @@ func extractAutoscalingOptionsFromKubeEnv(kubeEnvValue string) (map[string]strin
|
|||
func extractEvictionHardFromKubeEnv(kubeEnvValue string) (map[string]string, error) {
|
||||
evictionHardAsString, found, err := extractAutoscalerVarFromKubeEnv(kubeEnvValue, "evictionHard")
|
||||
if err != nil {
|
||||
klog.Warning("error while obtaining eviction-hard from AUTOSCALER_ENV_VARS; %v", err)
|
||||
klog.Warningf("error while obtaining eviction-hard from AUTOSCALER_ENV_VARS; %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,12 +19,13 @@ package hetzner
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"maps"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -155,7 +156,7 @@ func (n *hetznerNodeGroup) DeleteNodes(nodes []*apiv1.Node) error {
|
|||
|
||||
err := n.manager.deleteByNode(node)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete server ID %d error: %v", node.Name, err)
|
||||
klog.Errorf("failed to delete server ID %s error: %v", node.Name, err)
|
||||
}
|
||||
|
||||
waitGroup.Done()
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func CreateCloudConfig(cloudConfigPath string, configProvider common.Configurati
|
|||
}
|
||||
if cloudConfig.Global.RefreshInterval == 0 {
|
||||
if os.Getenv(ipconsts.OciRefreshInterval) != "" {
|
||||
klog.V(4).Info("using a custom cache refresh interval %v...", os.Getenv(ipconsts.OciRefreshInterval))
|
||||
klog.V(4).Infof("using a custom cache refresh interval %v...", os.Getenv(ipconsts.OciRefreshInterval))
|
||||
cloudConfig.Global.RefreshInterval, _ = time.ParseDuration(os.Getenv(ipconsts.OciRefreshInterval))
|
||||
} else {
|
||||
if implType == npconsts.OciNodePoolResourceIdent {
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func BuildOVHcloud(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDis
|
|||
|
||||
configFile, err = os.Open(opts.CloudConfig)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to open cloud provider configuration %s: %v", opts.CloudConfig)
|
||||
klog.Fatalf("Failed to open cloud provider configuration %s: %v", opts.CloudConfig, err)
|
||||
}
|
||||
|
||||
defer configFile.Close()
|
||||
|
|
@ -268,7 +268,7 @@ func (provider *OVHCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
|
||||
flavorsByName, err := provider.manager.getFlavorsByName()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get flavors: %w", err)
|
||||
klog.Errorf("Failed to get flavors: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ func (ts *CloudServiceImpl) GetAutoScalingInstances(asgRef TcRef) ([]*as.Instanc
|
|||
}
|
||||
if resp.Response.TotalCount != nil {
|
||||
if totalCount != *resp.Response.TotalCount {
|
||||
klog.Warningf("%s instance totalCount changed: %d->%d, reset request", totalCount, *resp.Response.TotalCount)
|
||||
klog.Warningf("%s instance totalCount changed: %d->%d, reset request", asgRef.ID, totalCount, *resp.Response.TotalCount)
|
||||
totalCount = *resp.Response.TotalCount
|
||||
res = []*as.Instance{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ func (csr *ClusterStateRegistry) MaxNodeProvisionTime(nodeGroup cloudprovider.No
|
|||
func (csr *ClusterStateRegistry) registerOrUpdateScaleUpNoLock(nodeGroup cloudprovider.NodeGroup, delta int, currentTime time.Time) {
|
||||
maxNodeProvisionTime, err := csr.MaxNodeProvisionTime(nodeGroup)
|
||||
if err != nil {
|
||||
klog.Warningf("Couldn't update scale up request: failed to get maxNodeProvisionTime for node group %s: %w", nodeGroup.Id(), err)
|
||||
klog.Warningf("Couldn't update scale up request: failed to get maxNodeProvisionTime for node group %s: %v", nodeGroup.Id(), err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -628,7 +628,7 @@ func (csr *ClusterStateRegistry) updateReadinessStats(currentTime time.Time) {
|
|||
perNgCopy := perNodeGroup[nodeGroup.Id()]
|
||||
maxNodeProvisionTime, err := csr.MaxNodeProvisionTime(nodeGroup)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get maxNodeProvisionTime for node %s in node group %s: %w", unregistered.Node.Name, nodeGroup.Id(), err)
|
||||
klog.Warningf("Failed to get maxNodeProvisionTime for node %s in node group %s: %v", unregistered.Node.Name, nodeGroup.Id(), err)
|
||||
continue
|
||||
}
|
||||
if unregistered.UnregisteredSince.Add(maxNodeProvisionTime).Before(currentTime) {
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ func (c *Checker) unremovableReasonAndNodeUtilization(context *context.Autoscali
|
|||
|
||||
nodeGroup, err := context.CloudProvider.NodeGroupForNode(node)
|
||||
if err != nil {
|
||||
klog.Warning("Node group not found for node %v: %v", node.Name, err)
|
||||
klog.Warningf("Node group not found for node %v: %v", node.Name, err)
|
||||
return simulator.UnexpectedError, nil
|
||||
}
|
||||
if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
|
||||
|
|
|
|||
|
|
@ -370,20 +370,20 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
|
|||
}
|
||||
|
||||
if skipReason := o.IsNodeGroupResourceExceeded(resourcesLeft, ng, nodeInfo, 1); skipReason != nil {
|
||||
klog.Warning("ScaleUpToNodeGroupMinSize: node group resource excceded: %v", skipReason)
|
||||
klog.Warningf("ScaleUpToNodeGroupMinSize: node group resource excceded: %v", skipReason)
|
||||
continue
|
||||
}
|
||||
|
||||
newNodeCount := ng.MinSize() - targetSize
|
||||
newNodeCount, err = o.resourceManager.ApplyLimits(o.autoscalingContext, newNodeCount, resourcesLeft, nodeInfo, ng)
|
||||
if err != nil {
|
||||
klog.Warning("ScaleUpToNodeGroupMinSize: failed to apply resource limits: %v", err)
|
||||
klog.Warningf("ScaleUpToNodeGroupMinSize: failed to apply resource limits: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
newNodeCount, err = o.GetCappedNewNodeCount(newNodeCount, targetSize)
|
||||
if err != nil {
|
||||
klog.Warning("ScaleUpToNodeGroupMinSize: failed to get capped node count: %v", err)
|
||||
klog.Warningf("ScaleUpToNodeGroupMinSize: failed to get capped node count: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ func (s *DebuggingSnapshotImpl) SetStartTimestamp(t time.Time) {
|
|||
func (s *DebuggingSnapshotImpl) GetOutputBytes() ([]byte, bool) {
|
||||
errMsgSet := false
|
||||
if s.Error != "" {
|
||||
klog.Errorf("Debugging snapshot found with error message set when GetOutputBytes() is called. - ", s.Error)
|
||||
klog.Errorf("Debugging snapshot found with error message set when GetOutputBytes() is called: %v", s.Error)
|
||||
errMsgSet = true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ func (t *sngCapacityThreshold) computeNodeGroupCapacity(nodeGroup cloudprovider.
|
|||
nodeGroupTargetSize, err := nodeGroup.TargetSize()
|
||||
// Should not ever happen as only valid node groups are passed to estimator
|
||||
if err != nil {
|
||||
klog.Errorf("Error while computing available capacity of a node group %v: can't get target size of the group", nodeGroup.Id(), err)
|
||||
klog.Errorf("Error while computing available capacity of a node group %v: can't get target size of the group: %v", nodeGroup.Id(), err)
|
||||
return 0
|
||||
}
|
||||
groupCapacity := nodeGroup.MaxSize() - nodeGroupTargetSize
|
||||
|
|
|
|||
|
|
@ -129,13 +129,13 @@ func transformAndSanitizeOptionsFromGRPC(bestOptionsResponseOptions []*protos.Op
|
|||
var options []expander.Option
|
||||
for _, option := range bestOptionsResponseOptions {
|
||||
if option == nil {
|
||||
klog.Errorf("GRPC server returned nil Option")
|
||||
klog.Error("GRPC server returned nil Option")
|
||||
continue
|
||||
}
|
||||
if _, ok := nodeGroupIDOptionMap[option.NodeGroupId]; ok {
|
||||
options = append(options, nodeGroupIDOptionMap[option.NodeGroupId])
|
||||
} else {
|
||||
klog.Errorf("GRPC server returned invalid nodeGroup ID: ", option.NodeGroupId)
|
||||
klog.Errorf("GRPC server returned invalid nodeGroup ID: %s", option.NodeGroupId)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue