From d231a87aed73252a915a80edf48d96030036cb86 Mon Sep 17 00:00:00 2001 From: wei-chenglai Date: Thu, 20 Mar 2025 10:19:14 -0400 Subject: [PATCH] Bump golang to 1.23.7 and golangci-lint to 1.64.8 Signed-off-by: wei-chenglai --- .go-version | 2 +- go.mod | 2 +- hack/verify-staticcheck.sh | 2 +- pkg/controllers/binding/common.go | 2 +- .../federatedhpa/federatedhpa_controller.go | 20 +++++++++---------- .../federatedhpa/metrics/utilization.go | 2 +- .../status/cluster_status_controller.go | 4 ++-- .../workloadrebalancer_controller_test.go | 3 +-- pkg/estimator/client/cache.go | 2 +- pkg/estimator/client/general.go | 6 +++--- pkg/estimator/server/estimate.go | 4 ++-- .../plugins/resourcequota/resourcequota.go | 2 +- pkg/estimator/server/replica/replica.go | 2 +- .../customized/declarative/luavm/lua_test.go | 6 +++--- pkg/resourceinterpreter/interpreter.go | 2 +- pkg/scheduler/core/generic_scheduler_test.go | 2 +- .../select_clusters_by_cluster.go | 2 +- pkg/util/helper/binding.go | 2 +- pkg/util/helper/workstatus_test.go | 4 ++-- pkg/util/overridemanager/overridemanager.go | 3 ++- pkg/webhook/interpreter/response.go | 2 +- 21 files changed, 38 insertions(+), 38 deletions(-) diff --git a/.go-version b/.go-version index 2560439f0..2d27ccba1 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.12 +1.23.7 diff --git a/go.mod b/go.mod index 330d9f798..f00371bc0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/karmada-io/karmada -go 1.22.12 // keep in sync with .go-version +go 1.23.7 // keep in sync with .go-version require ( github.com/adhocore/gronx v1.6.3 diff --git a/hack/verify-staticcheck.sh b/hack/verify-staticcheck.sh index 43793fad1..d515a0837 100755 --- a/hack/verify-staticcheck.sh +++ b/hack/verify-staticcheck.sh @@ -19,7 +19,7 @@ set -o nounset set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -GOLANGCI_LINT_VER="v1.59.0" +GOLANGCI_LINT_VER="v1.64.8" cd "${REPO_ROOT}" source "hack/util.sh" diff --git a/pkg/controllers/binding/common.go b/pkg/controllers/binding/common.go index 6b82dd5d4..ca8dfa59c 100644 --- a/pkg/controllers/binding/common.go +++ b/pkg/controllers/binding/common.go @@ -306,7 +306,7 @@ func divideReplicasByJobCompletions(workload *unstructured.Unstructured, cluster } if found { - targetClusters = helper.SpreadReplicasByTargetClusters(int32(completions), clusters, nil) + targetClusters = helper.SpreadReplicasByTargetClusters(int32(completions), clusters, nil) // #nosec G115: integer overflow conversion int64 -> int32 } return targetClusters, nil diff --git a/pkg/controllers/federatedhpa/federatedhpa_controller.go b/pkg/controllers/federatedhpa/federatedhpa_controller.go index bfd834e54..4a2d5ff11 100644 --- a/pkg/controllers/federatedhpa/federatedhpa_controller.go +++ b/pkg/controllers/federatedhpa/federatedhpa_controller.go @@ -677,7 +677,7 @@ func (c *FHPAController) validateAndParseSelector(hpa *autoscalingv1alpha1.Feder errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err) c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "InvalidSelector", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "InvalidSelector", errMsg) - return nil, fmt.Errorf(errMsg) + return nil, errors.New(errMsg) } hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace} @@ -692,7 +692,7 @@ func (c *FHPAController) validateAndParseSelector(hpa *autoscalingv1alpha1.Feder errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas) c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "AmbiguousSelector", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "AmbiguousSelector", errMsg) - return nil, fmt.Errorf(errMsg) + return nil, errors.New(errMsg) } return parsedSelector, nil @@ -812,7 +812,7 @@ func (c *FHPAController) computeStatusForObjectMetric(specReplicas, statusReplic return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } errMsg := "invalid object metric source: neither a value target nor an average value target was set" - err = fmt.Errorf(errMsg) + err = errors.New(errMsg) condition = c.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) return 0, time.Time{}, "", condition, err } @@ -859,7 +859,7 @@ func (c *FHPAController) computeStatusForResourceMetricGeneric(ctx context.Conte if target.AverageUtilization == nil { errMsg := "invalid resource metric source: neither an average utilization target nor an average value target was set" - return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg) + return 0, nil, time.Time{}, "", condition, errors.New(errMsg) } targetUtilization := *target.AverageUtilization @@ -1286,10 +1286,10 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents, return currentReplicas // Scaling is disabled } else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect { result = math.MaxInt32 - selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value + selectPolicyFn = minOfInt32 // For scaling up, the lowest change ('min' policy) produces a minimum value } else { result = math.MinInt32 - selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change + selectPolicyFn = maxOfInt32 // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents) @@ -1315,10 +1315,10 @@ func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents, return currentReplicas // Scaling is disabled } else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect { result = math.MinInt32 - selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value + selectPolicyFn = maxOfInt32 // For scaling down, the lowest change ('min' policy) produces a maximum value } else { result = math.MaxInt32 - selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change + selectPolicyFn = minOfInt32 // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents) @@ -1415,14 +1415,14 @@ func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerConditi return resList } -func max(a, b int32) int32 { +func maxOfInt32(a, b int32) int32 { if a >= b { return a } return b } -func min(a, b int32) int32 { +func minOfInt32(a, b int32) int32 { if a <= b { return a } diff --git a/pkg/controllers/federatedhpa/metrics/utilization.go b/pkg/controllers/federatedhpa/metrics/utilization.go index e51215913..313a55ec4 100644 --- a/pkg/controllers/federatedhpa/metrics/utilization.go +++ b/pkg/controllers/federatedhpa/metrics/utilization.go @@ -46,7 +46,7 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int return 0, 0, 0, fmt.Errorf("no metrics returned matched known pods") } - currentUtilization = int32((metricsTotal * 100) / requestsTotal) + currentUtilization = int32((metricsTotal * 100) / requestsTotal) // #nosec G115: integer overflow conversion int64 -> int32 return float64(currentUtilization) / float64(targetUtilization), currentUtilization, metricsTotal / int64(numEntries), nil } diff --git a/pkg/controllers/status/cluster_status_controller.go b/pkg/controllers/status/cluster_status_controller.go index e3e361723..44cc8a0ce 100644 --- a/pkg/controllers/status/cluster_status_controller.go +++ b/pkg/controllers/status/cluster_status_controller.go @@ -553,8 +553,8 @@ func getNodeSummary(nodes []*corev1.Node) *clusterv1alpha1.NodeSummary { } nodeSummary := &clusterv1alpha1.NodeSummary{} - nodeSummary.TotalNum = int32(totalNum) - nodeSummary.ReadyNum = int32(readyNum) + nodeSummary.TotalNum = int32(totalNum) // #nosec G115: integer overflow conversion int -> int32 + nodeSummary.ReadyNum = int32(readyNum) // #nosec G115: integer overflow conversion int -> int32 return nodeSummary } diff --git a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go index 46d89b701..4c730c0be 100644 --- a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go +++ b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go @@ -462,8 +462,7 @@ func newClusterRoleObjectReference(obj *rbacv1.ClusterRole) appsv1alpha1.ObjectR // Helper function for generating random suffix func randomSuffix() string { - max := big.NewInt(10000) - n, err := rand.Int(rand.Reader, max) + n, err := rand.Int(rand.Reader, big.NewInt(10000)) if err != nil { // In a test setup, it's unlikely we'll hit this error panic(fmt.Sprintf("failed to generate random number: %v", err)) diff --git a/pkg/estimator/client/cache.go b/pkg/estimator/client/cache.go index 834915b6c..d21c3be08 100644 --- a/pkg/estimator/client/cache.go +++ b/pkg/estimator/client/cache.go @@ -110,7 +110,7 @@ func EstablishConnection(kubeClient kubernetes.Interface, serviceInfo SchedulerE } serverAddrs, err := resolveCluster(kubeClient, serviceInfo.Namespace, - names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort)) + names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort)) // #nosec G115: integer overflow conversion int -> int32 if err != nil { return err } diff --git a/pkg/estimator/client/general.go b/pkg/estimator/client/general.go index cfaebd6cb..d0e7368e5 100644 --- a/pkg/estimator/client/general.go +++ b/pkg/estimator/client/general.go @@ -66,7 +66,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste } if replicaRequirements == nil { - return int32(maximumReplicas) + return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32 } // if the allocatableModelings from the cluster status are empty possibly due to @@ -80,7 +80,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste maximumReplicas = num } - return int32(maximumReplicas) + return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32 } klog.Info(err.Error()) } @@ -90,7 +90,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste maximumReplicas = num } - return int32(maximumReplicas) + return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32 } func getAllowedPodNumber(resourceSummary *clusterv1alpha1.ResourceSummary) int64 { diff --git a/pkg/estimator/server/estimate.go b/pkg/estimator/server/estimate.go index c6cafb475..7687079b4 100644 --- a/pkg/estimator/server/estimate.go +++ b/pkg/estimator/server/estimate.go @@ -83,7 +83,7 @@ func (es *AccurateSchedulerEstimatorServer) estimateReplicas( } if !ret.IsSuccess() && !ret.IsNoOperation() { - return replicas, fmt.Errorf(fmt.Sprintf("estimate replice plugins fails with %s", ret.Reasons())) + return replicas, fmt.Errorf("estimate replica plugins fails with %s", ret.Reasons()) } processNode := func(i int) { node := allNodes[i] @@ -108,5 +108,5 @@ func (es *AccurateSchedulerEstimatorServer) nodeMaxAvailableReplica(node *framew // do not contain pod resources. So after subtraction, we should cope with allowed pod // number manually which is the upper bound of this node available replicas. rest.AllowedPodNumber = util.MaxInt64(rest.AllowedPodNumber-int64(len(node.Pods)), 0) - return int32(rest.MaxDivided(rl)) + return int32(rest.MaxDivided(rl)) // #nosec G115: integer overflow conversion int64 -> int32 } diff --git a/pkg/estimator/server/framework/plugins/resourcequota/resourcequota.go b/pkg/estimator/server/framework/plugins/resourcequota/resourcequota.go index c1c28174c..93c889441 100644 --- a/pkg/estimator/server/framework/plugins/resourcequota/resourcequota.go +++ b/pkg/estimator/server/framework/plugins/resourcequota/resourcequota.go @@ -171,7 +171,7 @@ func (e *resourceQuotaEvaluator) evaluate(replicaRequirements *pb.ReplicaRequire continue } - replica := int32(allowed) + replica := int32(allowed) // #nosec G115: integer overflow conversion int64 -> int32 if replica < result { result = replica } diff --git a/pkg/estimator/server/replica/replica.go b/pkg/estimator/server/replica/replica.go index 7dbd7a8f5..fcf98331c 100644 --- a/pkg/estimator/server/replica/replica.go +++ b/pkg/estimator/server/replica/replica.go @@ -66,7 +66,7 @@ func GetUnschedulablePodsOfWorkload(unstructObj *unstructured.Unstructured, thre // TODO(Garrybest): add abstract workload return 0, fmt.Errorf("kind(%s) of workload(%s) is not supported", unstructObj.GetKind(), klog.KObj(unstructObj).String()) } - return int32(unschedulable), nil + return int32(unschedulable), nil // #nosec G115: integer overflow conversion int -> int32 } func podUnschedulable(pod *corev1.Pod, threshold time.Duration) bool { diff --git a/pkg/resourceinterpreter/customized/declarative/luavm/lua_test.go b/pkg/resourceinterpreter/customized/declarative/luavm/lua_test.go index e17e955ce..941081ca3 100644 --- a/pkg/resourceinterpreter/customized/declarative/luavm/lua_test.go +++ b/pkg/resourceinterpreter/customized/declarative/luavm/lua_test.go @@ -255,7 +255,7 @@ func TestReviseDeploymentReplica(t *testing.T) { t.Run(tt.name, func(t *testing.T) { res, err := vm.ReviseReplica(tt.object, int64(tt.replica), tt.luaScript) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) } deploy := &appsv1.Deployment{} err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UnstructuredContent(), deploy) @@ -263,7 +263,7 @@ func TestReviseDeploymentReplica(t *testing.T) { t.Log("Success Test") } if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) } }) } @@ -461,7 +461,7 @@ func TestRetainDeployment(t *testing.T) { t.Run(tt.name, func(t *testing.T) { res, err := vm.Retain(tt.desiredObj, tt.observedObj, tt.luaScript) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) } if !reflect.DeepEqual(res.UnstructuredContent(), tt.observedObj.Object) { t.Errorf("Retain() got = %v, want %v", res.UnstructuredContent(), tt.observedObj.Object) diff --git a/pkg/resourceinterpreter/interpreter.go b/pkg/resourceinterpreter/interpreter.go index db498f7dc..2ed55eae1 100644 --- a/pkg/resourceinterpreter/interpreter.go +++ b/pkg/resourceinterpreter/interpreter.go @@ -161,7 +161,7 @@ func (i *customResourceInterpreterImpl) ReviseReplica(object *unstructured.Unstr obj, hookEnabled, err = i.customizedInterpreter.Patch(context.TODO(), &request.Attributes{ Operation: configv1alpha1.InterpreterOperationReviseReplica, Object: object, - ReplicasSet: int32(replica), + ReplicasSet: int32(replica), // #nosec G115: integer overflow conversion int64 -> int32 }) if err != nil { return nil, err diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 14bfb3010..04e0906e6 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -52,7 +52,7 @@ func stringToTargetCluster(str string) []workv1alpha2.TargetCluster { tcs := make([]workv1alpha2.TargetCluster, len(arr)) for i, replicas := range arr { num, _ := strconv.Atoi(replicas) - tcs[i].Replicas = int32(num) // #nosec G109 + tcs[i].Replicas = int32(num) // #nosec G109,G115: integer overflow conversion int -> int32 tcs[i].Name = indexToCluster[i] } return tcs diff --git a/pkg/scheduler/core/spreadconstraint/select_clusters_by_cluster.go b/pkg/scheduler/core/spreadconstraint/select_clusters_by_cluster.go index 687a8ef4d..069249bb7 100644 --- a/pkg/scheduler/core/spreadconstraint/select_clusters_by_cluster.go +++ b/pkg/scheduler/core/spreadconstraint/select_clusters_by_cluster.go @@ -40,7 +40,7 @@ func selectBestClustersByCluster(spreadConstraint policyv1alpha1.SpreadConstrain if needReplicas == InvalidReplicas { clusterInfos = groupClustersInfo.Clusters[:needCnt] } else { - clusterInfos = selectClustersByAvailableResource(groupClustersInfo.Clusters, int32(needCnt), needReplicas) + clusterInfos = selectClustersByAvailableResource(groupClustersInfo.Clusters, int32(needCnt), needReplicas) // #nosec G115: integer overflow conversion int -> int32 if len(clusterInfos) == 0 { return nil, fmt.Errorf("no enough resource when selecting %d clusters", needCnt) } diff --git a/pkg/util/helper/binding.go b/pkg/util/helper/binding.go index d3a605186..02f62087d 100644 --- a/pkg/util/helper/binding.go +++ b/pkg/util/helper/binding.go @@ -123,7 +123,7 @@ func (a *Dispenser) TakeByWeight(w ClusterWeightInfoList) { result := make([]workv1alpha2.TargetCluster, 0, w.Len()) remain := a.NumReplicas for _, info := range w { - replicas := int32(info.Weight * int64(a.NumReplicas) / sum) + replicas := int32(info.Weight * int64(a.NumReplicas) / sum) // #nosec G115: integer overflow conversion int64 -> int32 result = append(result, workv1alpha2.TargetCluster{ Name: info.ClusterName, Replicas: replicas, diff --git a/pkg/util/helper/workstatus_test.go b/pkg/util/helper/workstatus_test.go index 5b70f05b1..ba424ffc7 100644 --- a/pkg/util/helper/workstatus_test.go +++ b/pkg/util/helper/workstatus_test.go @@ -230,7 +230,7 @@ func TestAggregateResourceBindingWorkStatus(t *testing.T) { } // Verify aggregated status - if tt.works != nil && len(tt.works) > 0 { + if len(tt.works) > 0 { assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works)) for _, status := range updatedBinding.Status.AggregatedStatus { assert.Equal(t, tt.expectedApplied, status.Applied) @@ -427,7 +427,7 @@ func TestAggregateClusterResourceBindingWorkStatus(t *testing.T) { } // Verify aggregated status - if tt.works != nil && len(tt.works) > 0 { + if len(tt.works) > 0 { assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works)) // For multiple clusters case, verify specific cluster status for _, status := range updatedBinding.Status.AggregatedStatus { diff --git a/pkg/util/overridemanager/overridemanager.go b/pkg/util/overridemanager/overridemanager.go index f6c7abbf4..209d09e44 100644 --- a/pkg/util/overridemanager/overridemanager.go +++ b/pkg/util/overridemanager/overridemanager.go @@ -19,6 +19,7 @@ package overridemanager import ( "context" "encoding/json" + "errors" "fmt" "reflect" "sort" @@ -423,7 +424,7 @@ func applyFieldOverriders(rawObj *unstructured.Unstructured, FieldOverriders []p if kind != reflect.String { errMsg := fmt.Sprintf("Get object's value by overrider's path(%s) is not string", FieldOverriders[index].FieldPath) klog.Errorf(errMsg) - return fmt.Errorf(errMsg) + return errors.New(errMsg) } dataBytes := []byte(res.(string)) klog.V(4).Infof("Parsed JSON patches by FieldOverriders[%d](%+v)", index, FieldOverriders[index]) diff --git a/pkg/webhook/interpreter/response.go b/pkg/webhook/interpreter/response.go index 8db810300..3c51e40e6 100644 --- a/pkg/webhook/interpreter/response.go +++ b/pkg/webhook/interpreter/response.go @@ -53,7 +53,7 @@ func ValidationResponse(successful bool, msg string) Response { ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ Successful: successful, Status: &configv1alpha1.RequestStatus{ - Code: int32(code), + Code: int32(code), // #nosec G115: integer overflow conversion int -> int32 Message: msg, }, },