Fix error format strings according to best practices from CodeReviewComments

Fix error format strings according to best practices from CodeReviewComments

Fix error format strings according to best practices from CodeReviewComments

Reverted incorrect change to with error format string

Signed-off-by: CodeLingo Bot <hello@codelingo.io>
Signed-off-by: CodeLingoBot <hello@codelingo.io>
Signed-off-by: CodeLingo Bot <hello@codelingo.io>
Signed-off-by: CodeLingo Bot <bot@codelingo.io>

Resolve conflict

Signed-off-by: CodeLingo Bot <hello@codelingo.io>
Signed-off-by: CodeLingoBot <hello@codelingo.io>
Signed-off-by: CodeLingo Bot <hello@codelingo.io>
Signed-off-by: CodeLingo Bot <bot@codelingo.io>

Fix error strings in testscases to remedy failing tests

Signed-off-by: CodeLingo Bot <bot@codelingo.io>

Fix more error strings to remedy failing tests

Signed-off-by: CodeLingo Bot <bot@codelingo.io>
This commit is contained in:
CodeLingo Bot 2018-12-04 16:59:57 +01:00 committed by daanikus
parent 4002559a4c
commit c0603afdeb
42 changed files with 87 additions and 86 deletions

View File

@ -63,7 +63,7 @@ func (k *kubernetesClient) ContainerResources() (*apiv1.ResourceRequirements, er
return &container.Resources, nil
}
}
return nil, fmt.Errorf("Container %s was not found in deployment %s in namespace %s.", k.container, k.deployment, k.namespace)
return nil, fmt.Errorf("container %s was not found in deployment %s in namespace %s.", k.container, k.deployment, k.namespace)
}
func (k *kubernetesClient) UpdateDeployment(resources *apiv1.ResourceRequirements) error {
@ -83,7 +83,7 @@ func (k *kubernetesClient) UpdateDeployment(resources *apiv1.ResourceRequirement
}
}
return fmt.Errorf("Container %s was not found in the deployment %s in namespace %s.", k.container, k.deployment, k.namespace)
return fmt.Errorf("container %s was not found in the deployment %s in namespace %s.", k.container, k.deployment, k.namespace)
}
// NewKubernetesClient gives a KubernetesClient with the given dependencies.

View File

@ -49,7 +49,7 @@ func (m autoScalingWrapper) getInstanceTypeByLCName(name string) (string, error)
return "", err
}
if len(launchConfigurations.LaunchConfigurations) < 1 {
return "", fmt.Errorf("Unable to get first LaunchConfiguration for %s", name)
return "", fmt.Errorf("unable to get first LaunchConfiguration for %s", name)
}
return *launchConfigurations.LaunchConfigurations[0].InstanceType, nil

View File

@ -188,7 +188,7 @@ func (m *asgCache) InstancesByAsg(ref AwsRef) ([]AwsInstanceRef, error) {
return instances, nil
}
return nil, fmt.Errorf("Error while looking for instances of ASG: %s", ref)
return nil, fmt.Errorf("error while looking for instances of ASG: %s", ref)
}
func (m *asgCache) SetAsgSize(asg *asg, size int) error {

View File

@ -144,7 +144,7 @@ var validAwsRefIdRegex = regexp.MustCompile(`^aws\:\/\/\/[-0-9a-z]*\/[-0-9a-z]*$
// must be in format: aws:///zone/name
func AwsRefFromProviderId(id string) (*AwsInstanceRef, error) {
if validAwsRefIdRegex.FindStringSubmatch(id) == nil {
return nil, fmt.Errorf("Wrong id: expected format aws:///<zone>/<name>, got %v", id)
return nil, fmt.Errorf("wrong id: expected format aws:///<zone>/<name>, got %v", id)
}
splitted := strings.Split(id[7:], "/")
return &AwsInstanceRef{

View File

@ -184,7 +184,7 @@ func (m *AwsManager) GetAsgNodes(ref AwsRef) ([]AwsInstanceRef, error) {
func (m *AwsManager) getAsgTemplate(asg *asg) (*asgTemplate, error) {
if len(asg.AvailabilityZones) < 1 {
return nil, fmt.Errorf("Unable to get first AvailabilityZone for ASG %q", asg.Name)
return nil, fmt.Errorf("unable to get first AvailabilityZone for ASG %q", asg.Name)
}
az := asg.AvailabilityZones[0]

View File

@ -43,14 +43,14 @@ func (m ec2Wrapper) getInstanceTypeByLT(name string, version string) (string, er
}
if len(describeData.LaunchTemplateVersions) == 0 {
return "", fmt.Errorf("Unable to find template versions")
return "", fmt.Errorf("unable to find template versions")
}
lt := describeData.LaunchTemplateVersions[0]
instanceType := lt.LaunchTemplateData.InstanceType
if instanceType == nil {
return "", fmt.Errorf("Unable to find instance type within launch template")
return "", fmt.Errorf("unable to find instance type within launch template")
}
return aws.StringValue(instanceType), nil

View File

@ -146,7 +146,7 @@ func (m *asgCache) FindForInstance(instance *azureRef, vmType string) (cloudprov
}
if err := m.regenerate(); err != nil {
return nil, fmt.Errorf("Error while looking for ASG for instance %+v, error: %v", *instance, err)
return nil, fmt.Errorf("error while looking for ASG for instance %+v, error: %v", *instance, err)
}
if config, found := m.instanceToAsg[*instance]; found {
return config, nil

View File

@ -429,7 +429,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm
klog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
msiEndpoint, err := adal.GetMSIVMEndpoint()
if err != nil {
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
return nil, fmt.Errorf("getting the managed service identity endpoint: %v", err)
}
return adal.NewServicePrincipalTokenFromMSI(
msiEndpoint,
@ -463,7 +463,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm
env.ServiceManagementEndpoint)
}
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
return nil, fmt.Errorf("no credentials provided for AAD application %s", config.AADClientID)
}
func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) {

View File

@ -298,7 +298,7 @@ func (agentPool *ContainerServiceAgentPool) TargetSize() (int, error) {
func (agentPool *ContainerServiceAgentPool) SetSize(targetSize int) error {
if targetSize > agentPool.MaxSize() || targetSize < agentPool.MinSize() {
klog.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MaxSize())
return fmt.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MinSize())
return fmt.Errorf("target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MinSize())
}
klog.V(2).Infof("Setting size for cluster (%q) with new count (%d)", agentPool.clusterName, targetSize)
@ -314,7 +314,7 @@ func (agentPool *ContainerServiceAgentPool) SetSize(targetSize int) error {
//parameter
func (agentPool *ContainerServiceAgentPool) IncreaseSize(delta int) error {
if delta <= 0 {
return fmt.Errorf("Size increase must be +ve")
return fmt.Errorf("size increase must be +ve")
}
currentSize, err := agentPool.TargetSize()
if err != nil {
@ -322,7 +322,7 @@ func (agentPool *ContainerServiceAgentPool) IncreaseSize(delta int) error {
}
targetSize := int(currentSize) + delta
if targetSize > agentPool.MaxSize() {
return fmt.Errorf("Size increase request of %d more than max size %d set", targetSize, agentPool.MaxSize())
return fmt.Errorf("size increase request of %d more than max size %d set", targetSize, agentPool.MaxSize())
}
return agentPool.SetSize(targetSize)
}
@ -413,7 +413,7 @@ func (agentPool *ContainerServiceAgentPool) GetNodes() ([]string, error) {
func (agentPool *ContainerServiceAgentPool) DecreaseTargetSize(delta int) error {
if delta >= 0 {
klog.Errorf("Size decrease error: %d", delta)
return fmt.Errorf("Size decrease must be negative")
return fmt.Errorf("size decrease must be negative")
}
currentSize, err := agentPool.TargetSize()
if err != nil {

View File

@ -249,7 +249,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error {
resourceType, ok := resourceMap[typeFieldName].(string)
if ok && resourceType == nsgResourceType {
if nsgIndex != -1 {
err := fmt.Errorf("Found 2 resources with type %s in the template. There should only be 1", nsgResourceType)
err := fmt.Errorf("found 2 resources with type %s in the template. There should only be 1", nsgResourceType)
klog.Errorf(err.Error())
return err
}
@ -257,7 +257,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error {
}
if ok && resourceType == rtResourceType {
if rtIndex != -1 {
err := fmt.Errorf("Found 2 resources with type %s in the template. There should only be 1", rtResourceType)
err := fmt.Errorf("found 2 resources with type %s in the template. There should only be 1", rtResourceType)
klog.Warningf(err.Error())
return err
}
@ -286,7 +286,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error {
indexesToRemove := []int{}
if nsgIndex == -1 {
err := fmt.Errorf("Found no resources with type %s in the template. There should have been 1", nsgResourceType)
err := fmt.Errorf("found no resources with type %s in the template. There should have been 1", nsgResourceType)
klog.Errorf(err.Error())
return err
}
@ -433,7 +433,7 @@ func k8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agen
vmNum, err := strconv.Atoi(vmNameParts[k8sLinuxVMAgentIndexArrayIndex])
if err != nil {
return "", "", -1, fmt.Errorf("Error parsing VM Name: %v", err)
return "", "", -1, fmt.Errorf("error parsing VM Name: %v", err)
}
return vmNameParts[k8sLinuxVMAgentPoolNameIndex], vmNameParts[k8sLinuxVMAgentClusterIDIndex], vmNum, nil
@ -452,12 +452,12 @@ func windowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn
poolIndex, err = strconv.Atoi(poolInfo[:3])
if err != nil {
return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err)
return "", "", -1, -1, fmt.Errorf("error parsing VM Name: %v", err)
}
agentIndex, err = strconv.Atoi(poolInfo[3:])
if err != nil {
return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err)
return "", "", -1, -1, fmt.Errorf("error parsing VM Name: %v", err)
}
return poolPrefix, acsStr, poolIndex, agentIndex, nil
@ -543,7 +543,7 @@ func validateConfig(cfg *Config) error {
if cfg.VMType == vmTypeACS || cfg.VMType == vmTypeAKS {
// Cluster name is a mandatory param to proceed.
if cfg.ClusterName == "" {
return fmt.Errorf("Cluster name not set for type %+v", cfg.VMType)
return fmt.Errorf("cluster name not set for type %+v", cfg.VMType)
}
}

View File

@ -156,7 +156,7 @@ func (client *autoscalingGceClientV1) waitForOp(operation *gce.Operation, projec
klog.Warningf("Error while getting operation %s on %s: %v", operation.Name, operation.TargetLink, err)
}
}
return fmt.Errorf("Timeout while waiting for operation %s on %s to complete.", operation.Name, operation.TargetLink)
return fmt.Errorf("timeout while waiting for operation %s on %s to complete.", operation.Name, operation.TargetLink)
}
func (client *autoscalingGceClientV1) DeleteInstances(migRef GceRef, instances []*GceRef) error {

View File

@ -176,12 +176,12 @@ func (gc *GceCache) GetMigForInstance(instance *GceRef) (Mig, error) {
mig.Config.GceRef().Zone == instance.Zone &&
strings.HasPrefix(instance.Name, mig.Basename) {
if err := gc.regenerateCache(); err != nil {
return nil, fmt.Errorf("Error while looking for MIG for instance %+v, error: %v", *instance, err)
return nil, fmt.Errorf("error while looking for MIG for instance %+v, error: %v", *instance, err)
}
if mig, found := gc.instancesCache[*instance]; found {
return mig, nil
}
return nil, fmt.Errorf("Instance %+v does not belong to any configured MIG", *instance)
return nil, fmt.Errorf("instance %+v does not belong to any configured MIG", *instance)
}
}
// Instance doesn't belong to any configured mig.

View File

@ -137,7 +137,7 @@ func (ref GceRef) ToProviderId() string {
func GceRefFromProviderId(id string) (*GceRef, error) {
splitted := strings.Split(id[6:], "/")
if len(splitted) != 3 {
return nil, fmt.Errorf("Wrong id: expected format gce://<project-id>/<zone>/<name>, got %v", id)
return nil, fmt.Errorf("wrong id: expected format gce://<project-id>/<zone>/<name>, got %v", id)
}
return &GceRef{
Project: splitted[0],

View File

@ -163,7 +163,7 @@ func TestGetResourceLimiter(t *testing.T) {
assert.Equal(t, returnedResourceLimiterGKE, resourceLimiterGKE)
// Error in GceManager.
gceManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("Some error")).Once()
gceManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("some error")).Once()
returnedResourceLimiter, err = gce.GetResourceLimiter()
assert.Error(t, err)
}

View File

@ -233,7 +233,7 @@ func (m *gceManagerImpl) DeleteInstances(instances []*GceRef) error {
return err
}
if mig != commonMig {
return fmt.Errorf("Cannot delete instances which don't belong to the same MIG.")
return fmt.Errorf("cannot delete instances which don't belong to the same MIG.")
}
}

View File

@ -387,7 +387,7 @@ func TestDeleteInstances(t *testing.T) {
err = g.DeleteInstances(instances)
assert.Error(t, err)
assert.Equal(t, "Cannot delete instances which don't belong to the same MIG.", err.Error())
assert.Equal(t, "cannot delete instances which don't belong to the same MIG.", err.Error())
mock.AssertExpectationsForObjects(t, server)
}

View File

@ -73,7 +73,7 @@ func GenerateMigUrl(ref GceRef) string {
}
func parseGceUrl(url, expectedResource string) (project string, zone string, name string, err error) {
errMsg := fmt.Errorf("Wrong url: expected format https://content.googleapis.com/compute/v1/projects/<project-id>/zones/<zone>/%s/<name>, got %s", expectedResource, url)
errMsg := fmt.Errorf("wrong url: expected format https://content.googleapis.com/compute/v1/projects/<project-id>/zones/<zone>/%s/<name>, got %s", expectedResource, url)
if !strings.Contains(url, gceDomainSuffix) {
return "", "", "", errMsg
}
@ -85,7 +85,7 @@ func parseGceUrl(url, expectedResource string) (project string, zone string, nam
return "", "", "", errMsg
}
if splitted[3] != expectedResource {
return "", "", "", fmt.Errorf("Wrong resource in url: expected %s, got %s", expectedResource, splitted[3])
return "", "", "", fmt.Errorf("wrong resource in url: expected %s, got %s", expectedResource, splitted[3])
}
project = splitted[0]
zone = splitted[2]

View File

@ -311,7 +311,7 @@ func extractFromKubeEnv(kubeEnv, resource string) (string, error) {
kubeEnvMap := make(map[string]string)
err := yaml.Unmarshal([]byte(kubeEnv), &kubeEnvMap)
if err != nil {
return "", fmt.Errorf("Error unmarshalling kubeEnv: %v", err)
return "", fmt.Errorf("error unmarshalling kubeEnv: %v", err)
}
return kubeEnvMap[resource], nil
}

View File

@ -225,5 +225,5 @@ func (m *autoscalingGkeClientV1beta1) waitForGkeOp(op *gke_api_beta.Operation) e
klog.Warningf("Error while getting operation %s on %s: %v", op.Name, op.TargetLink, err)
}
}
return fmt.Errorf("Timeout while waiting for operation %s on %s to complete.", op.Name, op.TargetLink)
return fmt.Errorf("timeout while waiting for operation %s on %s to complete.", op.Name, op.TargetLink)
}

View File

@ -180,7 +180,7 @@ func (gke *GkeCloudProvider) NewNodeGroup(machineType string, labels map[string]
// but if it fails later, we'd end up with a node group we can't scale anyway,
// so there's no point creating it.
if _, err := gke.gkeManager.GetMigTemplateNode(mig); err != nil {
return nil, fmt.Errorf("Failed to build node from spec: %v", err)
return nil, fmt.Errorf("failed to build node from spec: %v", err)
}
return mig, nil
@ -388,7 +388,7 @@ func (mig *GkeMig) Create() (cloudprovider.NodeGroup, error) {
if !mig.exist && mig.autoprovisioned {
return mig.gkeManager.CreateNodePool(mig)
}
return nil, fmt.Errorf("Cannot create non-autoprovisioned node group")
return nil, fmt.Errorf("cannot create non-autoprovisioned node group")
}
// Delete deletes the node group on the cloud provider side.
@ -397,7 +397,7 @@ func (mig *GkeMig) Delete() error {
if mig.exist && mig.autoprovisioned {
return mig.gkeManager.DeleteNodePool(mig)
}
return fmt.Errorf("Cannot delete non-autoprovisioned node group")
return fmt.Errorf("cannot delete non-autoprovisioned node group")
}
// Autoprovisioned returns true if the node group is autoprovisioned.

View File

@ -203,7 +203,7 @@ func TestGetResourceLimiter(t *testing.T) {
assert.Equal(t, returnedResourceLimiterGKE, resourceLimiterGKE)
// Error in GceManager.
gkeManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("Some error")).Once()
gkeManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("some error")).Once()
returnedResourceLimiter, err = gke.GetResourceLimiter()
assert.Error(t, err)
}

View File

@ -426,7 +426,7 @@ func (m *gkeManagerImpl) DeleteInstances(instances []*gce.GceRef) error {
return err
}
if mig != commonMig {
return fmt.Errorf("Cannot delete instances which don't belong to the same MIG.")
return fmt.Errorf("cannot delete instances which don't belong to the same MIG.")
}
}

View File

@ -875,7 +875,7 @@ func TestDeleteInstances(t *testing.T) {
err = g.DeleteInstances(instances)
assert.Error(t, err)
assert.Equal(t, "Cannot delete instances which don't belong to the same MIG.", err.Error())
assert.Equal(t, "cannot delete instances which don't belong to the same MIG.", err.Error())
mock.AssertExpectationsForObjects(t, server)
}

View File

@ -179,11 +179,11 @@ func parseASGAutoDiscoverySpec(spec string) (ASGAutoDiscoveryConfig, error) {
tokens := strings.Split(spec, ":")
if len(tokens) != 2 {
return cfg, fmt.Errorf("Invalid node group auto discovery spec specified via --node-group-auto-discovery: %s", spec)
return cfg, fmt.Errorf("invalid node group auto discovery spec specified via --node-group-auto-discovery: %s", spec)
}
discoverer := tokens[0]
if discoverer != autoDiscovererTypeASG {
return cfg, fmt.Errorf("Unsupported discoverer specified: %s", discoverer)
return cfg, fmt.Errorf("unsupported discoverer specified: %s", discoverer)
}
param := tokens[1]
kv := strings.SplitN(param, "=", 2)
@ -192,14 +192,14 @@ func parseASGAutoDiscoverySpec(spec string) (ASGAutoDiscoveryConfig, error) {
}
k, v := kv[0], kv[1]
if k != asgAutoDiscovererKeyTag {
return cfg, fmt.Errorf("Unsupported parameter key \"%s\" is specified for discoverer \"%s\". The only supported key is \"%s\"", k, discoverer, asgAutoDiscovererKeyTag)
return cfg, fmt.Errorf("unsupported parameter key \"%s\" is specified for discoverer \"%s\". The only supported key is \"%s\"", k, discoverer, asgAutoDiscovererKeyTag)
}
if v == "" {
return cfg, errors.New("tag value not supplied")
}
p := strings.Split(v, ",")
if len(p) == 0 {
return cfg, fmt.Errorf("Invalid ASG tag for auto discovery specified: ASG tag must not be empty")
return cfg, fmt.Errorf("invalid ASG tag for auto discovery specified: ASG tag must not be empty")
}
cfg.Tags = make(map[string]string, len(p))
for _, label := range p {

View File

@ -287,7 +287,7 @@ func (tng *TestNodeGroup) Exist() bool {
// Create creates the node group on the cloud provider side.
func (tng *TestNodeGroup) Create() (cloudprovider.NodeGroup, error) {
if tng.Exist() {
return nil, fmt.Errorf("Group already exist")
return nil, fmt.Errorf("group already exist")
}
newNodeGroup := tng.cloudProvider.AddAutoprovisionedNodeGroup(tng.id, tng.minSize, tng.maxSize, 0, tng.machineType)
return newNodeGroup, tng.cloudProvider.onNodeGroupCreate(tng.id)
@ -370,13 +370,13 @@ func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
if tng.autoprovisioned {
template, found := tng.cloudProvider.machineTemplates[tng.machineType]
if !found {
return nil, fmt.Errorf("No template declared for %s", tng.machineType)
return nil, fmt.Errorf("no template declared for %s", tng.machineType)
}
return template, nil
}
template, found := tng.cloudProvider.machineTemplates[tng.id]
if !found {
return nil, fmt.Errorf("No template declared for %s", tng.id)
return nil, fmt.Errorf("no template declared for %s", tng.id)
}
return template, nil
}

View File

@ -947,7 +947,7 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube
}
klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError)
recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown")
return fmt.Errorf("Failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError)
return fmt.Errorf("failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError)
}
// Performs drain logic on the node. Marks the node as unschedulable and later removes all pods, giving

View File

@ -671,7 +671,7 @@ func TestDrainNodeWithRetries(t *testing.T) {
case ticket <- true:
default:
}
return true, nil, fmt.Errorf("Too many concurrent evictions")
return true, nil, fmt.Errorf("too many concurrent evictions")
}
})
err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second)
@ -730,7 +730,7 @@ func TestScaleDown(t *testing.T) {
case n2.Name:
return true, n2, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName())
})
fakeClient.Fake.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) {
deleteAction := action.(core.DeleteAction)
@ -945,7 +945,7 @@ func simpleScaleDownEmpty(t *testing.T, config *scaleTestConfig) {
if node, found := nodesMap[getAction.GetName()]; found {
return true, node, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName())
})
fakeClient.Fake.AddReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) {
@ -1030,7 +1030,7 @@ func TestNoScaleDownUnready(t *testing.T) {
case n2.Name:
return true, n2, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName())
})
provider := testprovider.NewTestCloudProvider(nil, func(nodeGroup string, node string) error {
@ -1122,7 +1122,7 @@ func TestScaleDownNoMove(t *testing.T) {
case n2.Name:
return true, n2, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName())
})
fakeClient.Fake.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) {
t.FailNow()
@ -1196,7 +1196,7 @@ func TestCleanToBeDeleted(t *testing.T) {
case n2.Name:
return true, n2, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
return true, nil, fmt.Errorf("wrong node: %v", getAction.GetName())
})
fakeClient.Fake.AddReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)

View File

@ -382,6 +382,7 @@ func simpleScaleUpTest(t *testing.T, config *scaleTestConfig) {
pod := buildTestPod(p)
pods = append(pods, pod)
}
podLister := kube_util.NewTestPodLister(pods)
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil, nil)

View File

@ -446,7 +446,7 @@ func fixNodeGroupSize(context *context.AutoscalingContext, clusterStateRegistry
incorrectSize.CurrentSize,
delta)
if err := nodeGroup.DecreaseTargetSize(delta); err != nil {
return fixed, fmt.Errorf("Failed to decrease %s: %v", nodeGroup.Id(), err)
return fixed, fmt.Errorf("failed to decrease %s: %v", nodeGroup.Id(), err)
}
fixed = true
}

View File

@ -62,5 +62,5 @@ func NewEstimatorBuilder(name string) (EstimatorBuilder, error) {
return NewBasicNodeEstimator()
}, nil
}
return nil, fmt.Errorf("Unknown estimator: %s", name)
return nil, fmt.Errorf("unknown estimator: %s", name)
}

View File

@ -457,25 +457,25 @@ func parseMultipleGpuLimits(flags MultiStringFlag) ([]config.GpuLimits, error) {
func parseSingleGpuLimit(limits string) (config.GpuLimits, error) {
parts := strings.Split(limits, ":")
if len(parts) != 3 {
return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit specification: %v", limits)
return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit specification: %v", limits)
}
gpuType := parts[0]
minVal, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is not integer: %v", limits)
return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - min is not integer: %v", limits)
}
maxVal, err := strconv.ParseInt(parts[2], 10, 64)
if err != nil {
return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is not integer: %v", limits)
return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - max is not integer: %v", limits)
}
if minVal < 0 {
return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is less than 0; %v", limits)
return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - min is less than 0; %v", limits)
}
if maxVal < 0 {
return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is less than 0; %v", limits)
return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - max is less than 0; %v", limits)
}
if minVal > maxVal {
return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is greater than max; %v", limits)
return config.GpuLimits{}, fmt.Errorf("incorrect gpu limit - min is greater than max; %v", limits)
}
parsedGpuLimits := config.GpuLimits{
GpuType: gpuType,

View File

@ -45,37 +45,37 @@ func TestParseSingleGpuLimit(t *testing.T) {
{
input: "gpu:1",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit specification: gpu:1",
expectedErrorMessage: "incorrect gpu limit specification: gpu:1",
},
{
input: "gpu:1:10:x",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit specification: gpu:1:10:x",
expectedErrorMessage: "incorrect gpu limit specification: gpu:1:10:x",
},
{
input: "gpu:x:10",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit - min is not integer: gpu:x:10",
expectedErrorMessage: "incorrect gpu limit - min is not integer: gpu:x:10",
},
{
input: "gpu:1:y",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit - max is not integer: gpu:1:y",
expectedErrorMessage: "incorrect gpu limit - max is not integer: gpu:1:y",
},
{
input: "gpu:-1:10",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit - min is less than 0; gpu:-1:10",
expectedErrorMessage: "incorrect gpu limit - min is less than 0; gpu:-1:10",
},
{
input: "gpu:1:-10",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit - max is less than 0; gpu:1:-10",
expectedErrorMessage: "incorrect gpu limit - max is less than 0; gpu:1:-10",
},
{
input: "gpu:10:1",
expectError: true,
expectedErrorMessage: "Incorrect gpu limit - min is greater than max; gpu:10:1",
expectedErrorMessage: "incorrect gpu limit - min is greater than max; gpu:10:1",
},
}

View File

@ -167,7 +167,7 @@ func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, s
func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) {
nodeAllocatable, found := node.Status.Allocatable[resourceName]
if !found {
return 0, fmt.Errorf("Failed to get %v from %s", resourceName, node.Name)
return 0, fmt.Errorf("failed to get %v from %s", resourceName, node.Name)
}
if nodeAllocatable.MilliValue() == 0 {
return 0, fmt.Errorf("%v is 0 at %s", resourceName, node.Name)

View File

@ -167,7 +167,7 @@ func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Depl
})
if err != nil {
return fmt.Errorf("Assertion failed for pending pods in %v: %v", deployment.Name, err)
return fmt.Errorf("assertion failed for pending pods in %v: %v", deployment.Name, err)
}
return nil
}

View File

@ -222,7 +222,7 @@ func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error
})
if err != nil {
return fmt.Errorf("Waiting for set of pods changed: %v", err)
return fmt.Errorf("waiting for set of pods changed: %v", err)
}
return nil
}
@ -241,7 +241,7 @@ func WaitForPodsEvicted(f *framework.Framework, podList *apiv1.PodList) error {
})
if err != nil {
return fmt.Errorf("Waiting for set of pods changed: %v", err)
return fmt.Errorf("waiting for set of pods changed: %v", err)
}
return nil
}

View File

@ -242,7 +242,7 @@ func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment)
func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error {
if obj == nil {
return fmt.Errorf("Object provided to create is empty")
return fmt.Errorf("object provided to create is empty")
}
createFunc := func() (bool, error) {
_, err := c.AppsV1().ReplicaSets(namespace).Create(obj)
@ -252,14 +252,14 @@ func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
}
return testutils.RetryWithExponentialBackOff(createFunc)
}
func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error {
if obj == nil {
return fmt.Errorf("Object provided to create is empty")
return fmt.Errorf("object provided to create is empty")
}
createFunc := func() (bool, error) {
_, err := c.AppsV1().StatefulSets(namespace).Create(obj)
@ -269,7 +269,7 @@ func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, ob
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
}
return testutils.RetryWithExponentialBackOff(createFunc)
}

View File

@ -154,7 +154,7 @@ func validateVPA(vpa *vpa_types.VerticalPodAutoscaler) error {
return fmt.Errorf("UpdateMode is required if UpdatePolicy is used")
}
if _, found := possibleUpdateModes[*mode]; !found {
return fmt.Errorf("Unexpected UpdateMode value %s", *mode)
return fmt.Errorf("unexpected UpdateMode value %s", *mode)
}
}
@ -166,13 +166,13 @@ func validateVPA(vpa *vpa_types.VerticalPodAutoscaler) error {
mode := policy.Mode
if mode != nil {
if _, found := possibleScalingModes[*mode]; !found {
return fmt.Errorf("Unexpected Mode value %s", *mode)
return fmt.Errorf("unexpected Mode value %s", *mode)
}
}
for resource, min := range policy.MinAllowed {
max, found := policy.MaxAllowed[resource]
if found && max.Cmp(min) < 0 {
return fmt.Errorf("Max resource for %v is lower than min", resource)
return fmt.Errorf("max resource for %v is lower than min", resource)
}
}
}

View File

@ -209,13 +209,13 @@ func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.Vertica
vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
vpa, exists := feeder.clusterState.Vpas[vpaID]
if !exists {
return fmt.Errorf("Cannot load checkpoint to missing VPA object %+v", vpaID)
return fmt.Errorf("cannot load checkpoint to missing VPA object %+v", vpaID)
}
cs := model.NewAggregateContainerState()
err := cs.LoadFromCheckpoint(&checkpoint.Status)
if err != nil {
return fmt.Errorf("Cannot load checkpoint for VPA %+v. Reason: %v", vpa.ID, err)
return fmt.Errorf("cannot load checkpoint for VPA %+v. Reason: %v", vpa.ID, err)
}
vpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs
return nil

View File

@ -93,7 +93,7 @@ func (c *prometheusClient) GetTimeseries(query string) ([]Timeseries, error) {
return nil
}, numRetries, retryDelay)
if err != nil {
return nil, fmt.Errorf("Retrying GetTimeseries unsuccessful: %v", err)
return nil, fmt.Errorf("retrying GetTimeseries unsuccessful: %v", err)
}
return decodeTimeseriesFromResponse(resp.Body)
}

View File

@ -175,7 +175,7 @@ func (a *AggregateContainerState) SaveToCheckpoint() (*vpa_types.VerticalPodAuto
// into the AggregateContainerState.
func (a *AggregateContainerState) LoadFromCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpointStatus) error {
if checkpoint.Version != SupportedCheckpointVersion {
return fmt.Errorf("Unsuported checkpoint version %s", checkpoint.Version)
return fmt.Errorf("unsuported checkpoint version %s", checkpoint.Version)
}
a.TotalSamplesCount = checkpoint.TotalSamplesCount
a.FirstSampleStart = checkpoint.FirstSampleStart.Time

View File

@ -168,7 +168,7 @@ func (cluster *ClusterState) AddSample(sample *ContainerUsageSampleWithKey) erro
return NewKeyError(sample.Container)
}
if !containerState.AddSample(&sample.ContainerUsageSample) {
return fmt.Errorf("Sample discarded (invalid or out of order)")
return fmt.Errorf("sample discarded (invalid or out of order)")
}
return nil
}
@ -185,7 +185,7 @@ func (cluster *ClusterState) RecordOOM(containerID ContainerID, timestamp time.T
}
err := containerState.RecordOOM(timestamp, requestedMemory)
if err != nil {
return fmt.Errorf("Error while recording OOM for %v, Reason: %v", containerID, err)
return fmt.Errorf("error while recording OOM for %v, Reason: %v", containerID, err)
}
return nil
}

View File

@ -164,7 +164,7 @@ func (container *ContainerState) RecordOOM(timestamp time.Time, requestedMemory
Resource: ResourceMemory,
}
if !container.addMemorySample(&oomMemorySample, true) {
return fmt.Errorf("Adding OOM sample failed")
return fmt.Errorf("adding OOM sample failed")
}
return nil
}