From c0603afdebc1304879b6b2019879833b9992c6c0 Mon Sep 17 00:00:00 2001 From: CodeLingo Bot Date: Tue, 4 Dec 2018 16:59:57 +0100 Subject: [PATCH] Fix error format strings according to best practices from CodeReviewComments Fix error format strings according to best practices from CodeReviewComments Fix error format strings according to best practices from CodeReviewComments Reverted incorrect change to with error format string Signed-off-by: CodeLingo Bot Signed-off-by: CodeLingoBot Signed-off-by: CodeLingo Bot Signed-off-by: CodeLingo Bot Resolve conflict Signed-off-by: CodeLingo Bot Signed-off-by: CodeLingoBot Signed-off-by: CodeLingo Bot Signed-off-by: CodeLingo Bot Fix error strings in testscases to remedy failing tests Signed-off-by: CodeLingo Bot Fix more error strings to remedy failing tests Signed-off-by: CodeLingo Bot --- addon-resizer/nanny/kubernetes_client.go | 4 ++-- .../cloudprovider/aws/auto_scaling.go | 2 +- .../cloudprovider/aws/auto_scaling_groups.go | 2 +- .../cloudprovider/aws/aws_cloud_provider.go | 2 +- .../cloudprovider/aws/aws_manager.go | 2 +- cluster-autoscaler/cloudprovider/aws/ec2.go | 4 ++-- .../cloudprovider/azure/azure_cache.go | 2 +- .../cloudprovider/azure/azure_client.go | 4 ++-- .../azure/azure_container_service_pool.go | 8 ++++---- .../cloudprovider/azure/azure_util.go | 14 +++++++------- .../cloudprovider/gce/autoscaling_gce_client.go | 2 +- cluster-autoscaler/cloudprovider/gce/cache.go | 4 ++-- .../cloudprovider/gce/gce_cloud_provider.go | 2 +- .../cloudprovider/gce/gce_cloud_provider_test.go | 2 +- .../cloudprovider/gce/gce_manager.go | 2 +- .../cloudprovider/gce/gce_manager_test.go | 2 +- cluster-autoscaler/cloudprovider/gce/gce_url.go | 4 ++-- cluster-autoscaler/cloudprovider/gce/templates.go | 2 +- .../gke/autoscaling_gke_client_v1beta1.go | 2 +- .../cloudprovider/gke/gke_cloud_provider.go | 6 +++--- .../cloudprovider/gke/gke_cloud_provider_test.go | 2 +- .../cloudprovider/gke/gke_manager.go | 2 +- .../cloudprovider/gke/gke_manager_test.go | 2 +- .../cloudprovider/node_group_discovery_options.go | 8 ++++---- .../cloudprovider/test/test_cloud_provider.go | 6 +++--- cluster-autoscaler/core/scale_down.go | 2 +- cluster-autoscaler/core/scale_down_test.go | 12 ++++++------ cluster-autoscaler/core/scale_up_test.go | 1 + cluster-autoscaler/core/utils.go | 2 +- cluster-autoscaler/estimator/estimator.go | 2 +- cluster-autoscaler/main.go | 12 ++++++------ cluster-autoscaler/main_test.go | 14 +++++++------- cluster-autoscaler/simulator/cluster.go | 2 +- vertical-pod-autoscaler/e2e/actuation.go | 2 +- vertical-pod-autoscaler/e2e/common.go | 4 ++-- vertical-pod-autoscaler/e2e/updater.go | 8 ++++---- .../pkg/admission-controller/logic/server.go | 6 +++--- .../pkg/recommender/input/cluster_feeder.go | 4 ++-- .../recommender/input/history/prometheus_client.go | 2 +- .../recommender/model/aggregate_container_state.go | 2 +- .../pkg/recommender/model/cluster.go | 4 ++-- .../pkg/recommender/model/container.go | 2 +- 42 files changed, 87 insertions(+), 86 deletions(-) diff --git a/addon-resizer/nanny/kubernetes_client.go b/addon-resizer/nanny/kubernetes_client.go index 3ec185046b..b5633d7766 100644 --- a/addon-resizer/nanny/kubernetes_client.go +++ b/addon-resizer/nanny/kubernetes_client.go @@ -63,7 +63,7 @@ func (k *kubernetesClient) ContainerResources() (*apiv1.ResourceRequirements, er return &container.Resources, nil } } - return nil, fmt.Errorf("Container %s was not found in deployment %s in namespace %s.", k.container, k.deployment, k.namespace) + return nil, fmt.Errorf("container %s was not found in deployment %s in namespace %s.", k.container, k.deployment, k.namespace) } func (k *kubernetesClient) UpdateDeployment(resources *apiv1.ResourceRequirements) error { @@ -83,7 +83,7 @@ func (k *kubernetesClient) UpdateDeployment(resources *apiv1.ResourceRequirement } } - return fmt.Errorf("Container %s was not found in the deployment %s in namespace %s.", k.container, k.deployment, k.namespace) + return fmt.Errorf("container %s was not found in the deployment %s in namespace %s.", k.container, k.deployment, k.namespace) } // NewKubernetesClient gives a KubernetesClient with the given dependencies. diff --git a/cluster-autoscaler/cloudprovider/aws/auto_scaling.go b/cluster-autoscaler/cloudprovider/aws/auto_scaling.go index 02c96f03af..16fe91ad5a 100644 --- a/cluster-autoscaler/cloudprovider/aws/auto_scaling.go +++ b/cluster-autoscaler/cloudprovider/aws/auto_scaling.go @@ -49,7 +49,7 @@ func (m autoScalingWrapper) getInstanceTypeByLCName(name string) (string, error) return "", err } if len(launchConfigurations.LaunchConfigurations) < 1 { - return "", fmt.Errorf("Unable to get first LaunchConfiguration for %s", name) + return "", fmt.Errorf("unable to get first LaunchConfiguration for %s", name) } return *launchConfigurations.LaunchConfigurations[0].InstanceType, nil diff --git a/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go b/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go index c626d267d2..1cc9102192 100644 --- a/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go +++ b/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go @@ -188,7 +188,7 @@ func (m *asgCache) InstancesByAsg(ref AwsRef) ([]AwsInstanceRef, error) { return instances, nil } - return nil, fmt.Errorf("Error while looking for instances of ASG: %s", ref) + return nil, fmt.Errorf("error while looking for instances of ASG: %s", ref) } func (m *asgCache) SetAsgSize(asg *asg, size int) error { diff --git a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go index 1f6a9f43b6..5f1364ccc0 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go @@ -144,7 +144,7 @@ var validAwsRefIdRegex = regexp.MustCompile(`^aws\:\/\/\/[-0-9a-z]*\/[-0-9a-z]*$ // must be in format: aws:///zone/name func AwsRefFromProviderId(id string) (*AwsInstanceRef, error) { if validAwsRefIdRegex.FindStringSubmatch(id) == nil { - return nil, fmt.Errorf("Wrong id: expected format aws:////, got %v", id) + return nil, fmt.Errorf("wrong id: expected format aws:////, got %v", id) } splitted := strings.Split(id[7:], "/") return &AwsInstanceRef{ diff --git a/cluster-autoscaler/cloudprovider/aws/aws_manager.go b/cluster-autoscaler/cloudprovider/aws/aws_manager.go index 142b7f9188..a71fb2f4ea 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_manager.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_manager.go @@ -184,7 +184,7 @@ func (m *AwsManager) GetAsgNodes(ref AwsRef) ([]AwsInstanceRef, error) { func (m *AwsManager) getAsgTemplate(asg *asg) (*asgTemplate, error) { if len(asg.AvailabilityZones) < 1 { - return nil, fmt.Errorf("Unable to get first AvailabilityZone for ASG %q", asg.Name) + return nil, fmt.Errorf("unable to get first AvailabilityZone for ASG %q", asg.Name) } az := asg.AvailabilityZones[0] diff --git a/cluster-autoscaler/cloudprovider/aws/ec2.go b/cluster-autoscaler/cloudprovider/aws/ec2.go index 950dadd80f..2620209a9e 100644 --- a/cluster-autoscaler/cloudprovider/aws/ec2.go +++ b/cluster-autoscaler/cloudprovider/aws/ec2.go @@ -43,14 +43,14 @@ func (m ec2Wrapper) getInstanceTypeByLT(name string, version string) (string, er } if len(describeData.LaunchTemplateVersions) == 0 { - return "", fmt.Errorf("Unable to find template versions") + return "", fmt.Errorf("unable to find template versions") } lt := describeData.LaunchTemplateVersions[0] instanceType := lt.LaunchTemplateData.InstanceType if instanceType == nil { - return "", fmt.Errorf("Unable to find instance type within launch template") + return "", fmt.Errorf("unable to find instance type within launch template") } return aws.StringValue(instanceType), nil diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_cache.go index 143f4f199f..345155ef33 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cache.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cache.go @@ -146,7 +146,7 @@ func (m *asgCache) FindForInstance(instance *azureRef, vmType string) (cloudprov } if err := m.regenerate(); err != nil { - return nil, fmt.Errorf("Error while looking for ASG for instance %+v, error: %v", *instance, err) + return nil, fmt.Errorf("error while looking for ASG for instance %+v, error: %v", *instance, err) } if config, found := m.instanceToAsg[*instance]; found { return config, nil diff --git a/cluster-autoscaler/cloudprovider/azure/azure_client.go b/cluster-autoscaler/cloudprovider/azure/azure_client.go index d976996719..6373c2f0ee 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_client.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_client.go @@ -429,7 +429,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm klog.V(2).Infoln("azure: using managed identity extension to retrieve access token") msiEndpoint, err := adal.GetMSIVMEndpoint() if err != nil { - return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) + return nil, fmt.Errorf("getting the managed service identity endpoint: %v", err) } return adal.NewServicePrincipalTokenFromMSI( msiEndpoint, @@ -463,7 +463,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm env.ServiceManagementEndpoint) } - return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) + return nil, fmt.Errorf("no credentials provided for AAD application %s", config.AADClientID) } func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) { diff --git a/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go index 9d0fbb4a31..762687042c 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go @@ -298,7 +298,7 @@ func (agentPool *ContainerServiceAgentPool) TargetSize() (int, error) { func (agentPool *ContainerServiceAgentPool) SetSize(targetSize int) error { if targetSize > agentPool.MaxSize() || targetSize < agentPool.MinSize() { klog.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MaxSize()) - return fmt.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MinSize()) + return fmt.Errorf("target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MinSize()) } klog.V(2).Infof("Setting size for cluster (%q) with new count (%d)", agentPool.clusterName, targetSize) @@ -314,7 +314,7 @@ func (agentPool *ContainerServiceAgentPool) SetSize(targetSize int) error { //parameter func (agentPool *ContainerServiceAgentPool) IncreaseSize(delta int) error { if delta <= 0 { - return fmt.Errorf("Size increase must be +ve") + return fmt.Errorf("size increase must be +ve") } currentSize, err := agentPool.TargetSize() if err != nil { @@ -322,7 +322,7 @@ func (agentPool *ContainerServiceAgentPool) IncreaseSize(delta int) error { } targetSize := int(currentSize) + delta if targetSize > agentPool.MaxSize() { - return fmt.Errorf("Size increase request of %d more than max size %d set", targetSize, agentPool.MaxSize()) + return fmt.Errorf("size increase request of %d more than max size %d set", targetSize, agentPool.MaxSize()) } return agentPool.SetSize(targetSize) } @@ -413,7 +413,7 @@ func (agentPool *ContainerServiceAgentPool) GetNodes() ([]string, error) { func (agentPool *ContainerServiceAgentPool) DecreaseTargetSize(delta int) error { if delta >= 0 { klog.Errorf("Size decrease error: %d", delta) - return fmt.Errorf("Size decrease must be negative") + return fmt.Errorf("size decrease must be negative") } currentSize, err := agentPool.TargetSize() if err != nil { diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util.go b/cluster-autoscaler/cloudprovider/azure/azure_util.go index a7e7e37303..ed89a14815 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_util.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_util.go @@ -249,7 +249,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { resourceType, ok := resourceMap[typeFieldName].(string) if ok && resourceType == nsgResourceType { if nsgIndex != -1 { - err := fmt.Errorf("Found 2 resources with type %s in the template. There should only be 1", nsgResourceType) + err := fmt.Errorf("found 2 resources with type %s in the template. There should only be 1", nsgResourceType) klog.Errorf(err.Error()) return err } @@ -257,7 +257,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { } if ok && resourceType == rtResourceType { if rtIndex != -1 { - err := fmt.Errorf("Found 2 resources with type %s in the template. There should only be 1", rtResourceType) + err := fmt.Errorf("found 2 resources with type %s in the template. There should only be 1", rtResourceType) klog.Warningf(err.Error()) return err } @@ -286,7 +286,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { indexesToRemove := []int{} if nsgIndex == -1 { - err := fmt.Errorf("Found no resources with type %s in the template. There should have been 1", nsgResourceType) + err := fmt.Errorf("found no resources with type %s in the template. There should have been 1", nsgResourceType) klog.Errorf(err.Error()) return err } @@ -433,7 +433,7 @@ func k8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agen vmNum, err := strconv.Atoi(vmNameParts[k8sLinuxVMAgentIndexArrayIndex]) if err != nil { - return "", "", -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, fmt.Errorf("error parsing VM Name: %v", err) } return vmNameParts[k8sLinuxVMAgentPoolNameIndex], vmNameParts[k8sLinuxVMAgentClusterIDIndex], vmNum, nil @@ -452,12 +452,12 @@ func windowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn poolIndex, err = strconv.Atoi(poolInfo[:3]) if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, -1, fmt.Errorf("error parsing VM Name: %v", err) } agentIndex, err = strconv.Atoi(poolInfo[3:]) if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, -1, fmt.Errorf("error parsing VM Name: %v", err) } return poolPrefix, acsStr, poolIndex, agentIndex, nil @@ -543,7 +543,7 @@ func validateConfig(cfg *Config) error { if cfg.VMType == vmTypeACS || cfg.VMType == vmTypeAKS { // Cluster name is a mandatory param to proceed. if cfg.ClusterName == "" { - return fmt.Errorf("Cluster name not set for type %+v", cfg.VMType) + return fmt.Errorf("cluster name not set for type %+v", cfg.VMType) } } diff --git a/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go b/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go index 26098c9244..3dace46c79 100644 --- a/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go +++ b/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go @@ -156,7 +156,7 @@ func (client *autoscalingGceClientV1) waitForOp(operation *gce.Operation, projec klog.Warningf("Error while getting operation %s on %s: %v", operation.Name, operation.TargetLink, err) } } - return fmt.Errorf("Timeout while waiting for operation %s on %s to complete.", operation.Name, operation.TargetLink) + return fmt.Errorf("timeout while waiting for operation %s on %s to complete.", operation.Name, operation.TargetLink) } func (client *autoscalingGceClientV1) DeleteInstances(migRef GceRef, instances []*GceRef) error { diff --git a/cluster-autoscaler/cloudprovider/gce/cache.go b/cluster-autoscaler/cloudprovider/gce/cache.go index a2ed344181..97854128cd 100644 --- a/cluster-autoscaler/cloudprovider/gce/cache.go +++ b/cluster-autoscaler/cloudprovider/gce/cache.go @@ -176,12 +176,12 @@ func (gc *GceCache) GetMigForInstance(instance *GceRef) (Mig, error) { mig.Config.GceRef().Zone == instance.Zone && strings.HasPrefix(instance.Name, mig.Basename) { if err := gc.regenerateCache(); err != nil { - return nil, fmt.Errorf("Error while looking for MIG for instance %+v, error: %v", *instance, err) + return nil, fmt.Errorf("error while looking for MIG for instance %+v, error: %v", *instance, err) } if mig, found := gc.instancesCache[*instance]; found { return mig, nil } - return nil, fmt.Errorf("Instance %+v does not belong to any configured MIG", *instance) + return nil, fmt.Errorf("instance %+v does not belong to any configured MIG", *instance) } } // Instance doesn't belong to any configured mig. diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go index 197003f9d9..9f4956c023 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go @@ -137,7 +137,7 @@ func (ref GceRef) ToProviderId() string { func GceRefFromProviderId(id string) (*GceRef, error) { splitted := strings.Split(id[6:], "/") if len(splitted) != 3 { - return nil, fmt.Errorf("Wrong id: expected format gce:////, got %v", id) + return nil, fmt.Errorf("wrong id: expected format gce:////, got %v", id) } return &GceRef{ Project: splitted[0], diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go index ac211d9f59..f32ef21c82 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go @@ -163,7 +163,7 @@ func TestGetResourceLimiter(t *testing.T) { assert.Equal(t, returnedResourceLimiterGKE, resourceLimiterGKE) // Error in GceManager. - gceManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("Some error")).Once() + gceManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("some error")).Once() returnedResourceLimiter, err = gce.GetResourceLimiter() assert.Error(t, err) } diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager.go b/cluster-autoscaler/cloudprovider/gce/gce_manager.go index 7cf662bcbe..6e0d90065c 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager.go @@ -233,7 +233,7 @@ func (m *gceManagerImpl) DeleteInstances(instances []*GceRef) error { return err } if mig != commonMig { - return fmt.Errorf("Cannot delete instances which don't belong to the same MIG.") + return fmt.Errorf("cannot delete instances which don't belong to the same MIG.") } } diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go b/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go index 54d0ab79c2..f7c005f839 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go @@ -387,7 +387,7 @@ func TestDeleteInstances(t *testing.T) { err = g.DeleteInstances(instances) assert.Error(t, err) - assert.Equal(t, "Cannot delete instances which don't belong to the same MIG.", err.Error()) + assert.Equal(t, "cannot delete instances which don't belong to the same MIG.", err.Error()) mock.AssertExpectationsForObjects(t, server) } diff --git a/cluster-autoscaler/cloudprovider/gce/gce_url.go b/cluster-autoscaler/cloudprovider/gce/gce_url.go index c4091ceb31..f9b1ab61af 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_url.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_url.go @@ -73,7 +73,7 @@ func GenerateMigUrl(ref GceRef) string { } func parseGceUrl(url, expectedResource string) (project string, zone string, name string, err error) { - errMsg := fmt.Errorf("Wrong url: expected format https://content.googleapis.com/compute/v1/projects//zones//%s/, got %s", expectedResource, url) + errMsg := fmt.Errorf("wrong url: expected format https://content.googleapis.com/compute/v1/projects//zones//%s/, got %s", expectedResource, url) if !strings.Contains(url, gceDomainSuffix) { return "", "", "", errMsg } @@ -85,7 +85,7 @@ func parseGceUrl(url, expectedResource string) (project string, zone string, nam return "", "", "", errMsg } if splitted[3] != expectedResource { - return "", "", "", fmt.Errorf("Wrong resource in url: expected %s, got %s", expectedResource, splitted[3]) + return "", "", "", fmt.Errorf("wrong resource in url: expected %s, got %s", expectedResource, splitted[3]) } project = splitted[0] zone = splitted[2] diff --git a/cluster-autoscaler/cloudprovider/gce/templates.go b/cluster-autoscaler/cloudprovider/gce/templates.go index 46dcade15d..6e2e4857fd 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates.go +++ b/cluster-autoscaler/cloudprovider/gce/templates.go @@ -311,7 +311,7 @@ func extractFromKubeEnv(kubeEnv, resource string) (string, error) { kubeEnvMap := make(map[string]string) err := yaml.Unmarshal([]byte(kubeEnv), &kubeEnvMap) if err != nil { - return "", fmt.Errorf("Error unmarshalling kubeEnv: %v", err) + return "", fmt.Errorf("error unmarshalling kubeEnv: %v", err) } return kubeEnvMap[resource], nil } diff --git a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go index c61dcd0ebc..b09f830a37 100644 --- a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go +++ b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go @@ -225,5 +225,5 @@ func (m *autoscalingGkeClientV1beta1) waitForGkeOp(op *gke_api_beta.Operation) e klog.Warningf("Error while getting operation %s on %s: %v", op.Name, op.TargetLink, err) } } - return fmt.Errorf("Timeout while waiting for operation %s on %s to complete.", op.Name, op.TargetLink) + return fmt.Errorf("timeout while waiting for operation %s on %s to complete.", op.Name, op.TargetLink) } diff --git a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go index 014715ea8f..20d4addb8c 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go @@ -180,7 +180,7 @@ func (gke *GkeCloudProvider) NewNodeGroup(machineType string, labels map[string] // but if it fails later, we'd end up with a node group we can't scale anyway, // so there's no point creating it. if _, err := gke.gkeManager.GetMigTemplateNode(mig); err != nil { - return nil, fmt.Errorf("Failed to build node from spec: %v", err) + return nil, fmt.Errorf("failed to build node from spec: %v", err) } return mig, nil @@ -388,7 +388,7 @@ func (mig *GkeMig) Create() (cloudprovider.NodeGroup, error) { if !mig.exist && mig.autoprovisioned { return mig.gkeManager.CreateNodePool(mig) } - return nil, fmt.Errorf("Cannot create non-autoprovisioned node group") + return nil, fmt.Errorf("cannot create non-autoprovisioned node group") } // Delete deletes the node group on the cloud provider side. @@ -397,7 +397,7 @@ func (mig *GkeMig) Delete() error { if mig.exist && mig.autoprovisioned { return mig.gkeManager.DeleteNodePool(mig) } - return fmt.Errorf("Cannot delete non-autoprovisioned node group") + return fmt.Errorf("cannot delete non-autoprovisioned node group") } // Autoprovisioned returns true if the node group is autoprovisioned. diff --git a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go index ced4d3e9cf..f7d48689f0 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go @@ -203,7 +203,7 @@ func TestGetResourceLimiter(t *testing.T) { assert.Equal(t, returnedResourceLimiterGKE, resourceLimiterGKE) // Error in GceManager. - gkeManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("Some error")).Once() + gkeManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("some error")).Once() returnedResourceLimiter, err = gke.GetResourceLimiter() assert.Error(t, err) } diff --git a/cluster-autoscaler/cloudprovider/gke/gke_manager.go b/cluster-autoscaler/cloudprovider/gke/gke_manager.go index 77843f8d59..8cc8e2aad2 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_manager.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_manager.go @@ -426,7 +426,7 @@ func (m *gkeManagerImpl) DeleteInstances(instances []*gce.GceRef) error { return err } if mig != commonMig { - return fmt.Errorf("Cannot delete instances which don't belong to the same MIG.") + return fmt.Errorf("cannot delete instances which don't belong to the same MIG.") } } diff --git a/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go b/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go index 58fa1191de..13ffd899f6 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go @@ -875,7 +875,7 @@ func TestDeleteInstances(t *testing.T) { err = g.DeleteInstances(instances) assert.Error(t, err) - assert.Equal(t, "Cannot delete instances which don't belong to the same MIG.", err.Error()) + assert.Equal(t, "cannot delete instances which don't belong to the same MIG.", err.Error()) mock.AssertExpectationsForObjects(t, server) } diff --git a/cluster-autoscaler/cloudprovider/node_group_discovery_options.go b/cluster-autoscaler/cloudprovider/node_group_discovery_options.go index 2e610f30cf..5dba4bd3ad 100644 --- a/cluster-autoscaler/cloudprovider/node_group_discovery_options.go +++ b/cluster-autoscaler/cloudprovider/node_group_discovery_options.go @@ -179,11 +179,11 @@ func parseASGAutoDiscoverySpec(spec string) (ASGAutoDiscoveryConfig, error) { tokens := strings.Split(spec, ":") if len(tokens) != 2 { - return cfg, fmt.Errorf("Invalid node group auto discovery spec specified via --node-group-auto-discovery: %s", spec) + return cfg, fmt.Errorf("invalid node group auto discovery spec specified via --node-group-auto-discovery: %s", spec) } discoverer := tokens[0] if discoverer != autoDiscovererTypeASG { - return cfg, fmt.Errorf("Unsupported discoverer specified: %s", discoverer) + return cfg, fmt.Errorf("unsupported discoverer specified: %s", discoverer) } param := tokens[1] kv := strings.SplitN(param, "=", 2) @@ -192,14 +192,14 @@ func parseASGAutoDiscoverySpec(spec string) (ASGAutoDiscoveryConfig, error) { } k, v := kv[0], kv[1] if k != asgAutoDiscovererKeyTag { - return cfg, fmt.Errorf("Unsupported parameter key \"%s\" is specified for discoverer \"%s\". The only supported key is \"%s\"", k, discoverer, asgAutoDiscovererKeyTag) + return cfg, fmt.Errorf("unsupported parameter key \"%s\" is specified for discoverer \"%s\". The only supported key is \"%s\"", k, discoverer, asgAutoDiscovererKeyTag) } if v == "" { return cfg, errors.New("tag value not supplied") } p := strings.Split(v, ",") if len(p) == 0 { - return cfg, fmt.Errorf("Invalid ASG tag for auto discovery specified: ASG tag must not be empty") + return cfg, fmt.Errorf("invalid ASG tag for auto discovery specified: ASG tag must not be empty") } cfg.Tags = make(map[string]string, len(p)) for _, label := range p { diff --git a/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go b/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go index 74e41be5a1..32a4f4e4c7 100644 --- a/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go @@ -287,7 +287,7 @@ func (tng *TestNodeGroup) Exist() bool { // Create creates the node group on the cloud provider side. func (tng *TestNodeGroup) Create() (cloudprovider.NodeGroup, error) { if tng.Exist() { - return nil, fmt.Errorf("Group already exist") + return nil, fmt.Errorf("group already exist") } newNodeGroup := tng.cloudProvider.AddAutoprovisionedNodeGroup(tng.id, tng.minSize, tng.maxSize, 0, tng.machineType) return newNodeGroup, tng.cloudProvider.onNodeGroupCreate(tng.id) @@ -370,13 +370,13 @@ func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { if tng.autoprovisioned { template, found := tng.cloudProvider.machineTemplates[tng.machineType] if !found { - return nil, fmt.Errorf("No template declared for %s", tng.machineType) + return nil, fmt.Errorf("no template declared for %s", tng.machineType) } return template, nil } template, found := tng.cloudProvider.machineTemplates[tng.id] if !found { - return nil, fmt.Errorf("No template declared for %s", tng.id) + return nil, fmt.Errorf("no template declared for %s", tng.id) } return template, nil } diff --git a/cluster-autoscaler/core/scale_down.go b/cluster-autoscaler/core/scale_down.go index 467cfa31f6..142d6ed686 100644 --- a/cluster-autoscaler/core/scale_down.go +++ b/cluster-autoscaler/core/scale_down.go @@ -947,7 +947,7 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube } klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError) recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown") - return fmt.Errorf("Failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError) + return fmt.Errorf("failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError) } // Performs drain logic on the node. Marks the node as unschedulable and later removes all pods, giving diff --git a/cluster-autoscaler/core/scale_down_test.go b/cluster-autoscaler/core/scale_down_test.go index 8aceb99f4d..28bab74775 100644 --- a/cluster-autoscaler/core/scale_down_test.go +++ b/cluster-autoscaler/core/scale_down_test.go @@ -671,7 +671,7 @@ func TestDrainNodeWithRetries(t *testing.T) { case ticket <- true: default: } - return true, nil, fmt.Errorf("Too many concurrent evictions") + return true, nil, fmt.Errorf("too many concurrent evictions") } }) err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second) @@ -730,7 +730,7 @@ func TestScaleDown(t *testing.T) { case n2.Name: return true, n2, nil } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) + return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName()) }) fakeClient.Fake.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) { deleteAction := action.(core.DeleteAction) @@ -945,7 +945,7 @@ func simpleScaleDownEmpty(t *testing.T, config *scaleTestConfig) { if node, found := nodesMap[getAction.GetName()]; found { return true, node, nil } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) + return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName()) }) fakeClient.Fake.AddReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) { @@ -1030,7 +1030,7 @@ func TestNoScaleDownUnready(t *testing.T) { case n2.Name: return true, n2, nil } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) + return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName()) }) provider := testprovider.NewTestCloudProvider(nil, func(nodeGroup string, node string) error { @@ -1122,7 +1122,7 @@ func TestScaleDownNoMove(t *testing.T) { case n2.Name: return true, n2, nil } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) + return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName()) }) fakeClient.Fake.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) { t.FailNow() @@ -1196,7 +1196,7 @@ func TestCleanToBeDeleted(t *testing.T) { case n2.Name: return true, n2, nil } - return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) + return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName()) }) fakeClient.Fake.AddReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) diff --git a/cluster-autoscaler/core/scale_up_test.go b/cluster-autoscaler/core/scale_up_test.go index 760cff10cd..f2877579cc 100644 --- a/cluster-autoscaler/core/scale_up_test.go +++ b/cluster-autoscaler/core/scale_up_test.go @@ -382,6 +382,7 @@ func simpleScaleUpTest(t *testing.T, config *scaleTestConfig) { pod := buildTestPod(p) pods = append(pods, pod) } + podLister := kube_util.NewTestPodLister(pods) listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil, nil) diff --git a/cluster-autoscaler/core/utils.go b/cluster-autoscaler/core/utils.go index 2bdf19140d..b7f49e160a 100644 --- a/cluster-autoscaler/core/utils.go +++ b/cluster-autoscaler/core/utils.go @@ -446,7 +446,7 @@ func fixNodeGroupSize(context *context.AutoscalingContext, clusterStateRegistry incorrectSize.CurrentSize, delta) if err := nodeGroup.DecreaseTargetSize(delta); err != nil { - return fixed, fmt.Errorf("Failed to decrease %s: %v", nodeGroup.Id(), err) + return fixed, fmt.Errorf("failed to decrease %s: %v", nodeGroup.Id(), err) } fixed = true } diff --git a/cluster-autoscaler/estimator/estimator.go b/cluster-autoscaler/estimator/estimator.go index 54818e1776..53fade81c2 100644 --- a/cluster-autoscaler/estimator/estimator.go +++ b/cluster-autoscaler/estimator/estimator.go @@ -62,5 +62,5 @@ func NewEstimatorBuilder(name string) (EstimatorBuilder, error) { return NewBasicNodeEstimator() }, nil } - return nil, fmt.Errorf("Unknown estimator: %s", name) + return nil, fmt.Errorf("unknown estimator: %s", name) } diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 7332761fd3..d6e837570c 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -457,25 +457,25 @@ func parseMultipleGpuLimits(flags MultiStringFlag) ([]config.GpuLimits, error) { func parseSingleGpuLimit(limits string) (config.GpuLimits, error) { parts := strings.Split(limits, ":") if len(parts) != 3 { - return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit specification: %v", limits) + return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit specification: %v", limits) } gpuType := parts[0] minVal, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { - return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is not integer: %v", limits) + return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - min is not integer: %v", limits) } maxVal, err := strconv.ParseInt(parts[2], 10, 64) if err != nil { - return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is not integer: %v", limits) + return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - max is not integer: %v", limits) } if minVal < 0 { - return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is less than 0; %v", limits) + return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - min is less than 0; %v", limits) } if maxVal < 0 { - return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is less than 0; %v", limits) + return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - max is less than 0; %v", limits) } if minVal > maxVal { - return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is greater than max; %v", limits) + return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - min is greater than max; %v", limits) } parsedGpuLimits := config.GpuLimits{ GpuType: gpuType, diff --git a/cluster-autoscaler/main_test.go b/cluster-autoscaler/main_test.go index 578ad33981..32a02a8458 100644 --- a/cluster-autoscaler/main_test.go +++ b/cluster-autoscaler/main_test.go @@ -45,37 +45,37 @@ func TestParseSingleGpuLimit(t *testing.T) { { input: "gpu:1", expectError: true, - expectedErrorMessage: "Incorrect gpu limit specification: gpu:1", + expectedErrorMessage: "incorrect gpu limit specification: gpu:1", }, { input: "gpu:1:10:x", expectError: true, - expectedErrorMessage: "Incorrect gpu limit specification: gpu:1:10:x", + expectedErrorMessage: "incorrect gpu limit specification: gpu:1:10:x", }, { input: "gpu:x:10", expectError: true, - expectedErrorMessage: "Incorrect gpu limit - min is not integer: gpu:x:10", + expectedErrorMessage: "incorrect gpu limit - min is not integer: gpu:x:10", }, { input: "gpu:1:y", expectError: true, - expectedErrorMessage: "Incorrect gpu limit - max is not integer: gpu:1:y", + expectedErrorMessage: "incorrect gpu limit - max is not integer: gpu:1:y", }, { input: "gpu:-1:10", expectError: true, - expectedErrorMessage: "Incorrect gpu limit - min is less than 0; gpu:-1:10", + expectedErrorMessage: "incorrect gpu limit - min is less than 0; gpu:-1:10", }, { input: "gpu:1:-10", expectError: true, - expectedErrorMessage: "Incorrect gpu limit - max is less than 0; gpu:1:-10", + expectedErrorMessage: "incorrect gpu limit - max is less than 0; gpu:1:-10", }, { input: "gpu:10:1", expectError: true, - expectedErrorMessage: "Incorrect gpu limit - min is greater than max; gpu:10:1", + expectedErrorMessage: "incorrect gpu limit - min is greater than max; gpu:10:1", }, } diff --git a/cluster-autoscaler/simulator/cluster.go b/cluster-autoscaler/simulator/cluster.go index 854269510a..c0827f0c98 100644 --- a/cluster-autoscaler/simulator/cluster.go +++ b/cluster-autoscaler/simulator/cluster.go @@ -167,7 +167,7 @@ func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, s func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) { nodeAllocatable, found := node.Status.Allocatable[resourceName] if !found { - return 0, fmt.Errorf("Failed to get %v from %s", resourceName, node.Name) + return 0, fmt.Errorf("failed to get %v from %s", resourceName, node.Name) } if nodeAllocatable.MilliValue() == 0 { return 0, fmt.Errorf("%v is 0 at %s", resourceName, node.Name) diff --git a/vertical-pod-autoscaler/e2e/actuation.go b/vertical-pod-autoscaler/e2e/actuation.go index d58e1c94b0..96ca79e3da 100644 --- a/vertical-pod-autoscaler/e2e/actuation.go +++ b/vertical-pod-autoscaler/e2e/actuation.go @@ -167,7 +167,7 @@ func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Depl }) if err != nil { - return fmt.Errorf("Assertion failed for pending pods in %v: %v", deployment.Name, err) + return fmt.Errorf("assertion failed for pending pods in %v: %v", deployment.Name, err) } return nil } diff --git a/vertical-pod-autoscaler/e2e/common.go b/vertical-pod-autoscaler/e2e/common.go index ca5eaaa715..db568b7dda 100644 --- a/vertical-pod-autoscaler/e2e/common.go +++ b/vertical-pod-autoscaler/e2e/common.go @@ -222,7 +222,7 @@ func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error }) if err != nil { - return fmt.Errorf("Waiting for set of pods changed: %v", err) + return fmt.Errorf("waiting for set of pods changed: %v", err) } return nil } @@ -241,7 +241,7 @@ func WaitForPodsEvicted(f *framework.Framework, podList *apiv1.PodList) error { }) if err != nil { - return fmt.Errorf("Waiting for set of pods changed: %v", err) + return fmt.Errorf("waiting for set of pods changed: %v", err) } return nil } diff --git a/vertical-pod-autoscaler/e2e/updater.go b/vertical-pod-autoscaler/e2e/updater.go index 7e6f384d73..33b66e1d49 100644 --- a/vertical-pod-autoscaler/e2e/updater.go +++ b/vertical-pod-autoscaler/e2e/updater.go @@ -242,7 +242,7 @@ func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment) func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error { if obj == nil { - return fmt.Errorf("Object provided to create is empty") + return fmt.Errorf("object provided to create is empty") } createFunc := func() (bool, error) { _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) @@ -252,14 +252,14 @@ func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a if testutils.IsRetryableAPIError(err) { return false, nil } - return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err) + return false, fmt.Errorf("failed to create object with non-retriable error: %v", err) } return testutils.RetryWithExponentialBackOff(createFunc) } func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error { if obj == nil { - return fmt.Errorf("Object provided to create is empty") + return fmt.Errorf("object provided to create is empty") } createFunc := func() (bool, error) { _, err := c.AppsV1().StatefulSets(namespace).Create(obj) @@ -269,7 +269,7 @@ func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, ob if testutils.IsRetryableAPIError(err) { return false, nil } - return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err) + return false, fmt.Errorf("failed to create object with non-retriable error: %v", err) } return testutils.RetryWithExponentialBackOff(createFunc) } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index a71e532ad4..4b587ee3e7 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -154,7 +154,7 @@ func validateVPA(vpa *vpa_types.VerticalPodAutoscaler) error { return fmt.Errorf("UpdateMode is required if UpdatePolicy is used") } if _, found := possibleUpdateModes[*mode]; !found { - return fmt.Errorf("Unexpected UpdateMode value %s", *mode) + return fmt.Errorf("unexpected UpdateMode value %s", *mode) } } @@ -166,13 +166,13 @@ func validateVPA(vpa *vpa_types.VerticalPodAutoscaler) error { mode := policy.Mode if mode != nil { if _, found := possibleScalingModes[*mode]; !found { - return fmt.Errorf("Unexpected Mode value %s", *mode) + return fmt.Errorf("unexpected Mode value %s", *mode) } } for resource, min := range policy.MinAllowed { max, found := policy.MaxAllowed[resource] if found && max.Cmp(min) < 0 { - return fmt.Errorf("Max resource for %v is lower than min", resource) + return fmt.Errorf("max resource for %v is lower than min", resource) } } } diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go index a2fb5e5318..b20fb7fe6b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go @@ -209,13 +209,13 @@ func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.Vertica vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName} vpa, exists := feeder.clusterState.Vpas[vpaID] if !exists { - return fmt.Errorf("Cannot load checkpoint to missing VPA object %+v", vpaID) + return fmt.Errorf("cannot load checkpoint to missing VPA object %+v", vpaID) } cs := model.NewAggregateContainerState() err := cs.LoadFromCheckpoint(&checkpoint.Status) if err != nil { - return fmt.Errorf("Cannot load checkpoint for VPA %+v. Reason: %v", vpa.ID, err) + return fmt.Errorf("cannot load checkpoint for VPA %+v. Reason: %v", vpa.ID, err) } vpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs return nil diff --git a/vertical-pod-autoscaler/pkg/recommender/input/history/prometheus_client.go b/vertical-pod-autoscaler/pkg/recommender/input/history/prometheus_client.go index 5a77638b05..86f4b3c8cb 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/history/prometheus_client.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/history/prometheus_client.go @@ -93,7 +93,7 @@ func (c *prometheusClient) GetTimeseries(query string) ([]Timeseries, error) { return nil }, numRetries, retryDelay) if err != nil { - return nil, fmt.Errorf("Retrying GetTimeseries unsuccessful: %v", err) + return nil, fmt.Errorf("retrying GetTimeseries unsuccessful: %v", err) } return decodeTimeseriesFromResponse(resp.Body) } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go index 996df2cba6..0b64b77b02 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go @@ -175,7 +175,7 @@ func (a *AggregateContainerState) SaveToCheckpoint() (*vpa_types.VerticalPodAuto // into the AggregateContainerState. func (a *AggregateContainerState) LoadFromCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpointStatus) error { if checkpoint.Version != SupportedCheckpointVersion { - return fmt.Errorf("Unsuported checkpoint version %s", checkpoint.Version) + return fmt.Errorf("unsuported checkpoint version %s", checkpoint.Version) } a.TotalSamplesCount = checkpoint.TotalSamplesCount a.FirstSampleStart = checkpoint.FirstSampleStart.Time diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go index eb5d173282..24e13f75ba 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go @@ -168,7 +168,7 @@ func (cluster *ClusterState) AddSample(sample *ContainerUsageSampleWithKey) erro return NewKeyError(sample.Container) } if !containerState.AddSample(&sample.ContainerUsageSample) { - return fmt.Errorf("Sample discarded (invalid or out of order)") + return fmt.Errorf("sample discarded (invalid or out of order)") } return nil } @@ -185,7 +185,7 @@ func (cluster *ClusterState) RecordOOM(containerID ContainerID, timestamp time.T } err := containerState.RecordOOM(timestamp, requestedMemory) if err != nil { - return fmt.Errorf("Error while recording OOM for %v, Reason: %v", containerID, err) + return fmt.Errorf("error while recording OOM for %v, Reason: %v", containerID, err) } return nil } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/container.go b/vertical-pod-autoscaler/pkg/recommender/model/container.go index 38c6f0452a..e79e844799 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/container.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/container.go @@ -164,7 +164,7 @@ func (container *ContainerState) RecordOOM(timestamp time.Time, requestedMemory Resource: ResourceMemory, } if !container.addMemorySample(&oomMemorySample, true) { - return fmt.Errorf("Adding OOM sample failed") + return fmt.Errorf("adding OOM sample failed") } return nil }