Do not assign return values to variables when not needed

This commit is contained in:
Jarvis-Zhou 2019-10-23 21:50:35 +08:00
parent 7118ea8228
commit 7c9d6e3518
9 changed files with 18 additions and 18 deletions

View File

@ -164,7 +164,7 @@ func TestGetResourceLimiter(t *testing.T) {
// Error in GceManager.
gceManagerMock.On("GetResourceLimiter").Return((*cloudprovider.ResourceLimiter)(nil), fmt.Errorf("some error")).Once()
returnedResourceLimiter, err = gce.GetResourceLimiter()
_, err = gce.GetResourceLimiter()
assert.Error(t, err)
}

View File

@ -1386,9 +1386,9 @@ func TestParseCustomMachineType(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, int64(2), cpu)
assert.Equal(t, int64(2816*units.MiB), mem)
cpu, mem, err = parseCustomMachineType("other-a2-2816")
_, _, err = parseCustomMachineType("other-a2-2816")
assert.Error(t, err)
cpu, mem, err = parseCustomMachineType("other-2-2816")
_, _, err = parseCustomMachineType("other-2-2816")
assert.Error(t, err)
}

View File

@ -72,13 +72,13 @@ func TestGetNodePrice(t *testing.T) {
node4 := BuildTestNode("sillyname4", 8000, 30*units.GiB)
node4.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(1, resource.DecimalSI)
node4.Labels = labels1
price4, err := model.NodePrice(node4, now, now.Add(time.Hour))
price4, _ := model.NodePrice(node4, now, now.Add(time.Hour))
// preemptible with gpu
node5 := BuildTestNode("sillyname5", 8000, 30*units.GiB)
node5.Labels = labels2
node5.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(1, resource.DecimalSI)
price5, err := model.NodePrice(node5, now, now.Add(time.Hour))
price5, _ := model.NodePrice(node5, now, now.Add(time.Hour))
// Nodes with GPU are way more expensive than regular.
// Being preemptible doesn't bring much of a discount (less than 50%).

View File

@ -35,9 +35,9 @@ func TestParseUrl(t *testing.T) {
assert.Equal(t, "us-central1-b", zone)
assert.Equal(t, "kubernetes-minion-group", name)
proj, zone, name, err = parseGceUrl("www.onet.pl", "instanceGroups")
_, _, _, err = parseGceUrl("www.onet.pl", "instanceGroups")
assert.NotNil(t, err)
proj, zone, name, err = parseGceUrl("https://content.googleapis.com/compute/vabc/projects/mwielgus-proj/zones/us-central1-b/instanceGroups/kubernetes-minion-group", "instanceGroups")
_, _, _, err = parseGceUrl("https://content.googleapis.com/compute/vabc/projects/mwielgus-proj/zones/us-central1-b/instanceGroups/kubernetes-minion-group", "instanceGroups")
assert.NotNil(t, err)
}

View File

@ -178,7 +178,7 @@ func (mgr *packetManagerRest) listPacketDevices() *Devices {
var jsonStr = []byte(``)
packetAuthToken := os.Getenv("PACKET_AUTH_TOKEN")
url := "https://api.packet.net/projects/" + mgr.projectID + "/devices"
req, err := http.NewRequest("GET", url, bytes.NewBuffer(jsonStr))
req, _ := http.NewRequest("GET", url, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Auth-Token", packetAuthToken)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
@ -345,7 +345,7 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd
klog.Infof("nodegroup match %s %s", d.Hostname, n.Name)
if d.Hostname == n.Name {
klog.V(1).Infof("Matching Packet Device %s - %s", d.Hostname, d.ID)
req, err := http.NewRequest("DELETE", "https://api.packet.net/devices/"+d.ID, bytes.NewBuffer([]byte("")))
req, _ := http.NewRequest("DELETE", "https://api.packet.net/devices/"+d.ID, bytes.NewBuffer([]byte("")))
req.Header.Set("X-Auth-Token", packetAuthToken)
req.Header.Set("Content-Type", "application/json")

View File

@ -1138,7 +1138,7 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
}
// Evictions created successfully, wait maxGracefulTerminationSec + podEvictionHeadroom to see if pods really disappeared.
allGone := true
var allGone bool
for start := time.Now(); time.Now().Sub(start) < time.Duration(maxGracefulTerminationSec)*time.Second+podEvictionHeadroom; time.Sleep(5 * time.Second) {
allGone = true
for _, pod := range pods {

View File

@ -136,11 +136,11 @@ func TestFindUnneededNodes(t *testing.T) {
[]*apiv1.Pod{p1, p2, p3, p4, p5, p6}, time.Now(), nil, nil)
assert.Equal(t, 3, len(sd.unneededNodes))
addTime, found := sd.unneededNodes["n2"]
_, found := sd.unneededNodes["n2"]
assert.True(t, found)
addTime, found = sd.unneededNodes["n7"]
_, found = sd.unneededNodes["n7"]
assert.True(t, found)
addTime, found = sd.unneededNodes["n8"]
addTime, found := sd.unneededNodes["n8"]
assert.True(t, found)
assert.Contains(t, sd.podLocationHints, p2.Namespace+"/"+p2.Name)
assert.Equal(t, 6, len(sd.nodeUtilizationMap))

View File

@ -327,7 +327,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
// we tread pods with nominated node-name as scheduled for sake of scale-up considerations
scheduledPods = append(scheduledPods, unschedulableWaitingForLowerPriorityPreemption...)
unschedulablePodsToHelp, scheduledPods, err := a.processors.PodListProcessor.Process(
unschedulablePodsToHelp, scheduledPods, _ := a.processors.PodListProcessor.Process(
a.AutoscalingContext, unschedulablePods, scheduledPods, allNodes, readyNodes,
getUpcomingNodeInfos(a.clusterStateRegistry, nodeInfosForGroups))

View File

@ -197,14 +197,14 @@ func TestBalanceHittingMaxSize(t *testing.T) {
assert.Equal(t, 3, scaleUpInfo[0].NewSize)
// First group maxes out before proceeding to next one
scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3"), 4)
scaleUpInfo, _ = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3"), 4)
assert.Equal(t, 2, len(scaleUpInfo))
scaleUpMap := toMap(scaleUpInfo)
assert.Equal(t, 3, scaleUpMap["ng2"].NewSize)
assert.Equal(t, 5, scaleUpMap["ng3"].NewSize)
// Last group maxes out before previous one
scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3", "ng4"), 9)
scaleUpInfo, _ = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3", "ng4"), 9)
assert.Equal(t, 3, len(scaleUpInfo))
scaleUpMap = toMap(scaleUpInfo)
assert.Equal(t, 3, scaleUpMap["ng2"].NewSize)
@ -212,7 +212,7 @@ func TestBalanceHittingMaxSize(t *testing.T) {
assert.Equal(t, 7, scaleUpMap["ng4"].NewSize)
// Use all capacity, cap to max
scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3", "ng4"), 900)
scaleUpInfo, _ = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3", "ng4"), 900)
assert.Equal(t, 3, len(scaleUpInfo))
scaleUpMap = toMap(scaleUpInfo)
assert.Equal(t, 3, scaleUpMap["ng2"].NewSize)
@ -220,7 +220,7 @@ func TestBalanceHittingMaxSize(t *testing.T) {
assert.Equal(t, 7, scaleUpMap["ng4"].NewSize)
// One node group exceeds max.
scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng5"), 1)
scaleUpInfo, _ = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng5"), 1)
assert.Equal(t, 1, len(scaleUpInfo))
scaleUpMap = toMap(scaleUpInfo)
assert.Equal(t, 2, scaleUpMap["ng2"].NewSize)