Bump golang to 1.23.7 and golangci-lint to 1.64.8

Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
This commit is contained in:
wei-chenglai 2025-03-20 10:19:14 -04:00
parent 56b72f1a80
commit d231a87aed
21 changed files with 38 additions and 38 deletions

View File

@ -1 +1 @@
1.22.12 1.23.7

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/karmada-io/karmada module github.com/karmada-io/karmada
go 1.22.12 // keep in sync with .go-version go 1.23.7 // keep in sync with .go-version
require ( require (
github.com/adhocore/gronx v1.6.3 github.com/adhocore/gronx v1.6.3

View File

@ -19,7 +19,7 @@ set -o nounset
set -o pipefail set -o pipefail
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
GOLANGCI_LINT_VER="v1.59.0" GOLANGCI_LINT_VER="v1.64.8"
cd "${REPO_ROOT}" cd "${REPO_ROOT}"
source "hack/util.sh" source "hack/util.sh"

View File

@ -306,7 +306,7 @@ func divideReplicasByJobCompletions(workload *unstructured.Unstructured, cluster
} }
if found { if found {
targetClusters = helper.SpreadReplicasByTargetClusters(int32(completions), clusters, nil) targetClusters = helper.SpreadReplicasByTargetClusters(int32(completions), clusters, nil) // #nosec G115: integer overflow conversion int64 -> int32
} }
return targetClusters, nil return targetClusters, nil

View File

@ -677,7 +677,7 @@ func (c *FHPAController) validateAndParseSelector(hpa *autoscalingv1alpha1.Feder
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err) errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "InvalidSelector", errMsg) c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "InvalidSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "InvalidSelector", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "InvalidSelector", errMsg)
return nil, fmt.Errorf(errMsg) return nil, errors.New(errMsg)
} }
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace} hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
@ -692,7 +692,7 @@ func (c *FHPAController) validateAndParseSelector(hpa *autoscalingv1alpha1.Feder
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas) errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "AmbiguousSelector", errMsg) c.EventRecorder.Event(hpa, corev1.EventTypeWarning, "AmbiguousSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "AmbiguousSelector", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, corev1.ConditionFalse, "AmbiguousSelector", errMsg)
return nil, fmt.Errorf(errMsg) return nil, errors.New(errMsg)
} }
return parsedSelector, nil return parsedSelector, nil
@ -812,7 +812,7 @@ func (c *FHPAController) computeStatusForObjectMetric(specReplicas, statusReplic
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
} }
errMsg := "invalid object metric source: neither a value target nor an average value target was set" errMsg := "invalid object metric source: neither a value target nor an average value target was set"
err = fmt.Errorf(errMsg) err = errors.New(errMsg)
condition = c.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) condition = c.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
return 0, time.Time{}, "", condition, err return 0, time.Time{}, "", condition, err
} }
@ -859,7 +859,7 @@ func (c *FHPAController) computeStatusForResourceMetricGeneric(ctx context.Conte
if target.AverageUtilization == nil { if target.AverageUtilization == nil {
errMsg := "invalid resource metric source: neither an average utilization target nor an average value target was set" errMsg := "invalid resource metric source: neither an average utilization target nor an average value target was set"
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg) return 0, nil, time.Time{}, "", condition, errors.New(errMsg)
} }
targetUtilization := *target.AverageUtilization targetUtilization := *target.AverageUtilization
@ -1286,10 +1286,10 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents,
return currentReplicas // Scaling is disabled return currentReplicas // Scaling is disabled
} else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect { } else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect {
result = math.MaxInt32 result = math.MaxInt32
selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value selectPolicyFn = minOfInt32 // For scaling up, the lowest change ('min' policy) produces a minimum value
} else { } else {
result = math.MinInt32 result = math.MinInt32
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change selectPolicyFn = maxOfInt32 // Use the default policy otherwise to produce a highest possible change
} }
for _, policy := range scalingRules.Policies { for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents) replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
@ -1315,10 +1315,10 @@ func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents,
return currentReplicas // Scaling is disabled return currentReplicas // Scaling is disabled
} else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect { } else if scalingRules.SelectPolicy != nil && *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect {
result = math.MinInt32 result = math.MinInt32
selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value selectPolicyFn = maxOfInt32 // For scaling down, the lowest change ('min' policy) produces a maximum value
} else { } else {
result = math.MaxInt32 result = math.MaxInt32
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change selectPolicyFn = minOfInt32 // Use the default policy otherwise to produce a highest possible change
} }
for _, policy := range scalingRules.Policies { for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents) replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
@ -1415,14 +1415,14 @@ func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerConditi
return resList return resList
} }
func max(a, b int32) int32 { func maxOfInt32(a, b int32) int32 {
if a >= b { if a >= b {
return a return a
} }
return b return b
} }
func min(a, b int32) int32 { func minOfInt32(a, b int32) int32 {
if a <= b { if a <= b {
return a return a
} }

View File

@ -46,7 +46,7 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
return 0, 0, 0, fmt.Errorf("no metrics returned matched known pods") return 0, 0, 0, fmt.Errorf("no metrics returned matched known pods")
} }
currentUtilization = int32((metricsTotal * 100) / requestsTotal) currentUtilization = int32((metricsTotal * 100) / requestsTotal) // #nosec G115: integer overflow conversion int64 -> int32
return float64(currentUtilization) / float64(targetUtilization), currentUtilization, metricsTotal / int64(numEntries), nil return float64(currentUtilization) / float64(targetUtilization), currentUtilization, metricsTotal / int64(numEntries), nil
} }

View File

@ -553,8 +553,8 @@ func getNodeSummary(nodes []*corev1.Node) *clusterv1alpha1.NodeSummary {
} }
nodeSummary := &clusterv1alpha1.NodeSummary{} nodeSummary := &clusterv1alpha1.NodeSummary{}
nodeSummary.TotalNum = int32(totalNum) nodeSummary.TotalNum = int32(totalNum) // #nosec G115: integer overflow conversion int -> int32
nodeSummary.ReadyNum = int32(readyNum) nodeSummary.ReadyNum = int32(readyNum) // #nosec G115: integer overflow conversion int -> int32
return nodeSummary return nodeSummary
} }

View File

@ -462,8 +462,7 @@ func newClusterRoleObjectReference(obj *rbacv1.ClusterRole) appsv1alpha1.ObjectR
// Helper function for generating random suffix // Helper function for generating random suffix
func randomSuffix() string { func randomSuffix() string {
max := big.NewInt(10000) n, err := rand.Int(rand.Reader, big.NewInt(10000))
n, err := rand.Int(rand.Reader, max)
if err != nil { if err != nil {
// In a test setup, it's unlikely we'll hit this error // In a test setup, it's unlikely we'll hit this error
panic(fmt.Sprintf("failed to generate random number: %v", err)) panic(fmt.Sprintf("failed to generate random number: %v", err))

View File

@ -110,7 +110,7 @@ func EstablishConnection(kubeClient kubernetes.Interface, serviceInfo SchedulerE
} }
serverAddrs, err := resolveCluster(kubeClient, serviceInfo.Namespace, serverAddrs, err := resolveCluster(kubeClient, serviceInfo.Namespace,
names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort)) names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort)) // #nosec G115: integer overflow conversion int -> int32
if err != nil { if err != nil {
return err return err
} }

View File

@ -66,7 +66,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste
} }
if replicaRequirements == nil { if replicaRequirements == nil {
return int32(maximumReplicas) return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32
} }
// if the allocatableModelings from the cluster status are empty possibly due to // if the allocatableModelings from the cluster status are empty possibly due to
@ -80,7 +80,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste
maximumReplicas = num maximumReplicas = num
} }
return int32(maximumReplicas) return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32
} }
klog.Info(err.Error()) klog.Info(err.Error())
} }
@ -90,7 +90,7 @@ func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluste
maximumReplicas = num maximumReplicas = num
} }
return int32(maximumReplicas) return int32(maximumReplicas) // #nosec G115: integer overflow conversion int64 -> int32
} }
func getAllowedPodNumber(resourceSummary *clusterv1alpha1.ResourceSummary) int64 { func getAllowedPodNumber(resourceSummary *clusterv1alpha1.ResourceSummary) int64 {

View File

@ -83,7 +83,7 @@ func (es *AccurateSchedulerEstimatorServer) estimateReplicas(
} }
if !ret.IsSuccess() && !ret.IsNoOperation() { if !ret.IsSuccess() && !ret.IsNoOperation() {
return replicas, fmt.Errorf(fmt.Sprintf("estimate replice plugins fails with %s", ret.Reasons())) return replicas, fmt.Errorf("estimate replica plugins fails with %s", ret.Reasons())
} }
processNode := func(i int) { processNode := func(i int) {
node := allNodes[i] node := allNodes[i]
@ -108,5 +108,5 @@ func (es *AccurateSchedulerEstimatorServer) nodeMaxAvailableReplica(node *framew
// do not contain pod resources. So after subtraction, we should cope with allowed pod // do not contain pod resources. So after subtraction, we should cope with allowed pod
// number manually which is the upper bound of this node available replicas. // number manually which is the upper bound of this node available replicas.
rest.AllowedPodNumber = util.MaxInt64(rest.AllowedPodNumber-int64(len(node.Pods)), 0) rest.AllowedPodNumber = util.MaxInt64(rest.AllowedPodNumber-int64(len(node.Pods)), 0)
return int32(rest.MaxDivided(rl)) return int32(rest.MaxDivided(rl)) // #nosec G115: integer overflow conversion int64 -> int32
} }

View File

@ -171,7 +171,7 @@ func (e *resourceQuotaEvaluator) evaluate(replicaRequirements *pb.ReplicaRequire
continue continue
} }
replica := int32(allowed) replica := int32(allowed) // #nosec G115: integer overflow conversion int64 -> int32
if replica < result { if replica < result {
result = replica result = replica
} }

View File

@ -66,7 +66,7 @@ func GetUnschedulablePodsOfWorkload(unstructObj *unstructured.Unstructured, thre
// TODO(Garrybest): add abstract workload // TODO(Garrybest): add abstract workload
return 0, fmt.Errorf("kind(%s) of workload(%s) is not supported", unstructObj.GetKind(), klog.KObj(unstructObj).String()) return 0, fmt.Errorf("kind(%s) of workload(%s) is not supported", unstructObj.GetKind(), klog.KObj(unstructObj).String())
} }
return int32(unschedulable), nil return int32(unschedulable), nil // #nosec G115: integer overflow conversion int -> int32
} }
func podUnschedulable(pod *corev1.Pod, threshold time.Duration) bool { func podUnschedulable(pod *corev1.Pod, threshold time.Duration) bool {

View File

@ -255,7 +255,7 @@ func TestReviseDeploymentReplica(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
res, err := vm.ReviseReplica(tt.object, int64(tt.replica), tt.luaScript) res, err := vm.ReviseReplica(tt.object, int64(tt.replica), tt.luaScript)
if err != nil { if err != nil {
t.Errorf(err.Error()) t.Error(err.Error())
} }
deploy := &appsv1.Deployment{} deploy := &appsv1.Deployment{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UnstructuredContent(), deploy) err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UnstructuredContent(), deploy)
@ -263,7 +263,7 @@ func TestReviseDeploymentReplica(t *testing.T) {
t.Log("Success Test") t.Log("Success Test")
} }
if err != nil { if err != nil {
t.Errorf(err.Error()) t.Error(err.Error())
} }
}) })
} }
@ -461,7 +461,7 @@ func TestRetainDeployment(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
res, err := vm.Retain(tt.desiredObj, tt.observedObj, tt.luaScript) res, err := vm.Retain(tt.desiredObj, tt.observedObj, tt.luaScript)
if err != nil { if err != nil {
t.Errorf(err.Error()) t.Error(err.Error())
} }
if !reflect.DeepEqual(res.UnstructuredContent(), tt.observedObj.Object) { if !reflect.DeepEqual(res.UnstructuredContent(), tt.observedObj.Object) {
t.Errorf("Retain() got = %v, want %v", res.UnstructuredContent(), tt.observedObj.Object) t.Errorf("Retain() got = %v, want %v", res.UnstructuredContent(), tt.observedObj.Object)

View File

@ -161,7 +161,7 @@ func (i *customResourceInterpreterImpl) ReviseReplica(object *unstructured.Unstr
obj, hookEnabled, err = i.customizedInterpreter.Patch(context.TODO(), &request.Attributes{ obj, hookEnabled, err = i.customizedInterpreter.Patch(context.TODO(), &request.Attributes{
Operation: configv1alpha1.InterpreterOperationReviseReplica, Operation: configv1alpha1.InterpreterOperationReviseReplica,
Object: object, Object: object,
ReplicasSet: int32(replica), ReplicasSet: int32(replica), // #nosec G115: integer overflow conversion int64 -> int32
}) })
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -52,7 +52,7 @@ func stringToTargetCluster(str string) []workv1alpha2.TargetCluster {
tcs := make([]workv1alpha2.TargetCluster, len(arr)) tcs := make([]workv1alpha2.TargetCluster, len(arr))
for i, replicas := range arr { for i, replicas := range arr {
num, _ := strconv.Atoi(replicas) num, _ := strconv.Atoi(replicas)
tcs[i].Replicas = int32(num) // #nosec G109 tcs[i].Replicas = int32(num) // #nosec G109,G115: integer overflow conversion int -> int32
tcs[i].Name = indexToCluster[i] tcs[i].Name = indexToCluster[i]
} }
return tcs return tcs

View File

@ -40,7 +40,7 @@ func selectBestClustersByCluster(spreadConstraint policyv1alpha1.SpreadConstrain
if needReplicas == InvalidReplicas { if needReplicas == InvalidReplicas {
clusterInfos = groupClustersInfo.Clusters[:needCnt] clusterInfos = groupClustersInfo.Clusters[:needCnt]
} else { } else {
clusterInfos = selectClustersByAvailableResource(groupClustersInfo.Clusters, int32(needCnt), needReplicas) clusterInfos = selectClustersByAvailableResource(groupClustersInfo.Clusters, int32(needCnt), needReplicas) // #nosec G115: integer overflow conversion int -> int32
if len(clusterInfos) == 0 { if len(clusterInfos) == 0 {
return nil, fmt.Errorf("no enough resource when selecting %d clusters", needCnt) return nil, fmt.Errorf("no enough resource when selecting %d clusters", needCnt)
} }

View File

@ -123,7 +123,7 @@ func (a *Dispenser) TakeByWeight(w ClusterWeightInfoList) {
result := make([]workv1alpha2.TargetCluster, 0, w.Len()) result := make([]workv1alpha2.TargetCluster, 0, w.Len())
remain := a.NumReplicas remain := a.NumReplicas
for _, info := range w { for _, info := range w {
replicas := int32(info.Weight * int64(a.NumReplicas) / sum) replicas := int32(info.Weight * int64(a.NumReplicas) / sum) // #nosec G115: integer overflow conversion int64 -> int32
result = append(result, workv1alpha2.TargetCluster{ result = append(result, workv1alpha2.TargetCluster{
Name: info.ClusterName, Name: info.ClusterName,
Replicas: replicas, Replicas: replicas,

View File

@ -230,7 +230,7 @@ func TestAggregateResourceBindingWorkStatus(t *testing.T) {
} }
// Verify aggregated status // Verify aggregated status
if tt.works != nil && len(tt.works) > 0 { if len(tt.works) > 0 {
assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works)) assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works))
for _, status := range updatedBinding.Status.AggregatedStatus { for _, status := range updatedBinding.Status.AggregatedStatus {
assert.Equal(t, tt.expectedApplied, status.Applied) assert.Equal(t, tt.expectedApplied, status.Applied)
@ -427,7 +427,7 @@ func TestAggregateClusterResourceBindingWorkStatus(t *testing.T) {
} }
// Verify aggregated status // Verify aggregated status
if tt.works != nil && len(tt.works) > 0 { if len(tt.works) > 0 {
assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works)) assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works))
// For multiple clusters case, verify specific cluster status // For multiple clusters case, verify specific cluster status
for _, status := range updatedBinding.Status.AggregatedStatus { for _, status := range updatedBinding.Status.AggregatedStatus {

View File

@ -19,6 +19,7 @@ package overridemanager
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
@ -423,7 +424,7 @@ func applyFieldOverriders(rawObj *unstructured.Unstructured, FieldOverriders []p
if kind != reflect.String { if kind != reflect.String {
errMsg := fmt.Sprintf("Get object's value by overrider's path(%s) is not string", FieldOverriders[index].FieldPath) errMsg := fmt.Sprintf("Get object's value by overrider's path(%s) is not string", FieldOverriders[index].FieldPath)
klog.Errorf(errMsg) klog.Errorf(errMsg)
return fmt.Errorf(errMsg) return errors.New(errMsg)
} }
dataBytes := []byte(res.(string)) dataBytes := []byte(res.(string))
klog.V(4).Infof("Parsed JSON patches by FieldOverriders[%d](%+v)", index, FieldOverriders[index]) klog.V(4).Infof("Parsed JSON patches by FieldOverriders[%d](%+v)", index, FieldOverriders[index])

View File

@ -53,7 +53,7 @@ func ValidationResponse(successful bool, msg string) Response {
ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{
Successful: successful, Successful: successful,
Status: &configv1alpha1.RequestStatus{ Status: &configv1alpha1.RequestStatus{
Code: int32(code), Code: int32(code), // #nosec G115: integer overflow conversion int -> int32
Message: msg, Message: msg,
}, },
}, },