Bump golang to 1.23.7 and golangci-lint to 1.64.8

Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
This commit is contained in:
wei-chenglai 2025-03-20 10:19:14 -04:00
parent 56b72f1a80
commit d231a87aed
21 changed files with 38 additions and 38 deletions

View File

@ -1 +1 @@
1.22.12
1.23.7

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/karmada-io/karmada
go 1.22.12 // keep in sync with .go-version
go 1.23.7 // keep in sync with .go-version
require (
github.com/adhocore/gronx v1.6.3

View File

@ -19,7 +19,7 @@ set -o nounset
set -o pipefail
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
GOLANGCI_LINT_VER="v1.59.0"
GOLANGCI_LINT_VER="v1.64.8"
cd "${REPO_ROOT}"
source "hack/util.sh"

View File

@ -306,7 +306,7 @@ func divideReplicasByJobCompletions(workload *unstructured.Unstructured, cluster
}
if found {
targetClusters = helper.SpreadReplicasByTargetClusters(int32(completions), clusters, nil)
targetClusters = helper.SpreadReplicasByTargetClusters(int32(completions), clusters, nil) // #nosec G115: integer overflow conversion int64 -> int32
}
return targetClusters, nil

View File

@ -677,7 +677,7 @@ func (c *FHPAController) validateAndParseSelector(hpa *autoscalingv1alpha1.Feder
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "InvalidSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "InvalidSelector", errMsg)
return nil, fmt.Errorf(errMsg)
return nil, errors.New(errMsg)
}
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
@ -692,7 +692,7 @@ func (c *FHPAController) validateAndParseSelector(hpa *autoscalingv1alpha1.Feder
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "AmbiguousSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "AmbiguousSelector", errMsg)
return nil, fmt.Errorf(errMsg)
return nil, errors.New(errMsg)
}
return parsedSelector, nil
@ -812,7 +812,7 @@ func (c *FHPAController) computeStatusForObjectMetric(specReplicas, statusReplic
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
}
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
err = fmt.Errorf(errMsg)
err = errors.New(errMsg)
condition = c.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
return 0, time.Time{}, "", condition, err
}
@ -859,7 +859,7 @@ func (c *FHPAController) computeStatusForResourceMetricGeneric(ctx context.Conte
if target.AverageUtilization == nil {
errMsg := "invalid resource metric source: neither an average utilization target nor an average value target was set"
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
return 0, nil, time.Time{}, "", condition, errors.New(errMsg)
}
targetUtilization := *target.AverageUtilization
@ -1286,10 +1286,10 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents,
return currentReplicas // Scaling is disabled
} else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect {
result = math.MaxInt32
selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value
selectPolicyFn = minOfInt32 // For scaling up, the lowest change ('min' policy) produces a minimum value
} else {
result = math.MinInt32
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
selectPolicyFn = maxOfInt32 // Use the default policy otherwise to produce a highest possible change
}
for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
@ -1315,10 +1315,10 @@ func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents,
return currentReplicas // Scaling is disabled
} else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect {
result = math.MinInt32
selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value
selectPolicyFn = maxOfInt32 // For scaling down, the lowest change ('min' policy) produces a maximum value
} else {
result = math.MaxInt32
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
selectPolicyFn = minOfInt32 // Use the default policy otherwise to produce a highest possible change
}
for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
@ -1415,14 +1415,14 @@ func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerConditi
return resList
}
func max(a, b int32) int32 {
func maxOfInt32(a, b int32) int32 {
if a >= b {
return a
}
return b
}
func min(a, b int32) int32 {
func minOfInt32(a, b int32) int32 {
if a <= b {
return a
}

View File

@ -46,7 +46,7 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
return 0, 0, 0, fmt.Errorf("no metrics returned matched known pods")
}
currentUtilization = int32((metricsTotal * 100) / requestsTotal)
currentUtilization = int32((metricsTotal * 100) / requestsTotal) // #nosec G115: integer overflow conversion int64 -> int32
return float64(currentUtilization) / float64(targetUtilization), currentUtilization, metricsTotal / int64(numEntries), nil
}

View File

@ -553,8 +553,8 @@ func getNodeSummary(nodes []*corev1.Node) *clusterv1alpha1.NodeSummary {
}
nodeSummary := &clusterv1alpha1.NodeSummary{}
nodeSummary.TotalNum = int32(totalNum)
nodeSummary.ReadyNum = int32(readyNum)
nodeSummary.TotalNum = int32(totalNum) // #nosec G115: integer overflow conversion int -> int32
nodeSummary.ReadyNum = int32(readyNum) // #nosec G115: integer overflow conversion int -> int32
return nodeSummary
}

View File

@ -462,8 +462,7 @@ func newClusterRoleObjectReference(obj *rbacv1.ClusterRole) appsv1alpha1.ObjectR
// Helper function for generating random suffix
func randomSuffix() string {
max := big.NewInt(10000)
n, err := rand.Int(rand.Reader, max)
n, err := rand.Int(rand.Reader, big.NewInt(10000))
if err != nil {
// In a test setup, it's unlikely we'll hit this error
panic(fmt.Sprintf("failed to generate random number: %v", err))

View File

@ -110,7 +110,7 @@ func EstablishConnection(kubeClient kubernetes.Interface, serviceInfo SchedulerE
}
serverAddrs, err := resolveCluster(kubeClient, serviceInfo.Namespace,
names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort))
names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort)) // #nosec G115: integer overflow conversion int -> int32
if err != nil {
return err
}

View File

@ -66,7 +66,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste
}
if replicaRequirements == nil {
return int32(maximumReplicas)
return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32
}
// if the allocatableModelings from the cluster status are empty possibly due to
@ -80,7 +80,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste
maximumReplicas = num
}
return int32(maximumReplicas)
return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32
}
klog.Info(err.Error())
}
@ -90,7 +90,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste
maximumReplicas = num
}
return int32(maximumReplicas)
return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32
}
func getAllowedPodNumber(resourceSummary *clusterv1alpha1.ResourceSummary) int64 {

View File

@ -83,7 +83,7 @@ func (es *AccurateSchedulerEstimatorServer) estimateReplicas(
}
if !ret.IsSuccess() && !ret.IsNoOperation() {
return replicas, fmt.Errorf(fmt.Sprintf("estimate replice plugins fails with %s", ret.Reasons()))
return replicas, fmt.Errorf("estimate replica plugins fails with %s", ret.Reasons())
}
processNode := func(i int) {
node := allNodes[i]
@ -108,5 +108,5 @@ func (es *AccurateSchedulerEstimatorServer) nodeMaxAvailableReplica(node *framew
// do not contain pod resources. So after subtraction, we should cope with allowed pod
// number manually which is the upper bound of this node available replicas.
rest.AllowedPodNumber = util.MaxInt64(rest.AllowedPodNumber-int64(len(node.Pods)), 0)
return int32(rest.MaxDivided(rl))
return int32(rest.MaxDivided(rl)) // #nosec G115: integer overflow conversion int64 -> int32
}

View File

@ -171,7 +171,7 @@ func (e *resourceQuotaEvaluator) evaluate(replicaRequirements *pb.ReplicaRequire
continue
}
replica := int32(allowed)
replica := int32(allowed) // #nosec G115: integer overflow conversion int64 -> int32
if replica < result {
result = replica
}

View File

@ -66,7 +66,7 @@ func GetUnschedulablePodsOfWorkload(unstructObj *unstructured.Unstructured, thre
// TODO(Garrybest): add abstract workload
return 0, fmt.Errorf("kind(%s) of workload(%s) is not supported", unstructObj.GetKind(), klog.KObj(unstructObj).String())
}
return int32(unschedulable), nil
return int32(unschedulable), nil // #nosec G115: integer overflow conversion int -> int32
}
func podUnschedulable(pod *corev1.Pod, threshold time.Duration) bool {

View File

@ -255,7 +255,7 @@ func TestReviseDeploymentReplica(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
res, err := vm.ReviseReplica(tt.object, int64(tt.replica), tt.luaScript)
if err != nil {
t.Errorf(err.Error())
t.Error(err.Error())
}
deploy := &appsv1.Deployment{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UnstructuredContent(), deploy)
@ -263,7 +263,7 @@ func TestReviseDeploymentReplica(t *testing.T) {
t.Log("Success Test")
}
if err != nil {
t.Errorf(err.Error())
t.Error(err.Error())
}
})
}
@ -461,7 +461,7 @@ func TestRetainDeployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
res, err := vm.Retain(tt.desiredObj, tt.observedObj, tt.luaScript)
if err != nil {
t.Errorf(err.Error())
t.Error(err.Error())
}
if !reflect.DeepEqual(res.UnstructuredContent(), tt.observedObj.Object) {
t.Errorf("Retain() got = %v, want %v", res.UnstructuredContent(), tt.observedObj.Object)

View File

@ -161,7 +161,7 @@ func (i *customResourceInterpreterImpl) ReviseReplica(object *unstructured.Unstr
obj, hookEnabled, err = i.customizedInterpreter.Patch(context.TODO(), &request.Attributes{
Operation: configv1alpha1.InterpreterOperationReviseReplica,
Object: object,
ReplicasSet: int32(replica),
ReplicasSet: int32(replica), // #nosec G115: integer overflow conversion int64 -> int32
})
if err != nil {
return nil, err

View File

@ -52,7 +52,7 @@ func stringToTargetCluster(str string) []workv1alpha2.TargetCluster {
tcs := make([]workv1alpha2.TargetCluster, len(arr))
for i, replicas := range arr {
num, _ := strconv.Atoi(replicas)
tcs[i].Replicas = int32(num) // #nosec G109
tcs[i].Replicas = int32(num) // #nosec G109,G115: integer overflow conversion int -> int32
tcs[i].Name = indexToCluster[i]
}
return tcs

View File

@ -40,7 +40,7 @@ func selectBestClustersByCluster(spreadConstraint policyv1alpha1.SpreadConstrain
if needReplicas == InvalidReplicas {
clusterInfos = groupClustersInfo.Clusters[:needCnt]
} else {
clusterInfos = selectClustersByAvailableResource(groupClustersInfo.Clusters, int32(needCnt), needReplicas)
clusterInfos = selectClustersByAvailableResource(groupClustersInfo.Clusters, int32(needCnt), needReplicas) // #nosec G115: integer overflow conversion int -> int32
if len(clusterInfos) == 0 {
return nil, fmt.Errorf("no enough resource when selecting %d clusters", needCnt)
}

View File

@ -123,7 +123,7 @@ func (a *Dispenser) TakeByWeight(w ClusterWeightInfoList) {
result := make([]workv1alpha2.TargetCluster, 0, w.Len())
remain := a.NumReplicas
for _, info := range w {
replicas := int32(info.Weight * int64(a.NumReplicas) / sum)
replicas := int32(info.Weight * int64(a.NumReplicas) / sum) // #nosec G115: integer overflow conversion int64 -> int32
result = append(result, workv1alpha2.TargetCluster{
Name: info.ClusterName,
Replicas: replicas,

View File

@ -230,7 +230,7 @@ func TestAggregateResourceBindingWorkStatus(t *testing.T) {
}
// Verify aggregated status
if tt.works != nil && len(tt.works) > 0 {
if len(tt.works) > 0 {
assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works))
for _, status := range updatedBinding.Status.AggregatedStatus {
assert.Equal(t, tt.expectedApplied, status.Applied)
@ -427,7 +427,7 @@ func TestAggregateClusterResourceBindingWorkStatus(t *testing.T) {
}
// Verify aggregated status
if tt.works != nil && len(tt.works) > 0 {
if len(tt.works) > 0 {
assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works))
// For multiple clusters case, verify specific cluster status
for _, status := range updatedBinding.Status.AggregatedStatus {

View File

@ -19,6 +19,7 @@ package overridemanager
import (
"context"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
@ -423,7 +424,7 @@ func applyFieldOverriders(rawObj *unstructured.Unstructured, FieldOverriders []p
if kind != reflect.String {
errMsg := fmt.Sprintf("Get object's value by overrider's path(%s) is not string", FieldOverriders[index].FieldPath)
klog.Errorf(errMsg)
return fmt.Errorf(errMsg)
return errors.New(errMsg)
}
dataBytes := []byte(res.(string))
klog.V(4).Infof("Parsed JSON patches by FieldOverriders[%d](%+v)", index, FieldOverriders[index])

View File

@ -53,7 +53,7 @@ func ValidationResponse(successful bool, msg string) Response {
ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{
Successful: successful,
Status: &configv1alpha1.RequestStatus{
Code: int32(code),
Code: int32(code), // #nosec G115: integer overflow conversion int -> int32
Message: msg,
},
},