Merge pull request #6289 from seanlaii/fix-ST1019

Fix lint issue ST1019
This commit is contained in:
karmada-bot 2025-04-12 14:16:59 +08:00 committed by GitHub
commit 8659599d97
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 17 additions and 24 deletions

View File

@ -70,7 +70,6 @@ linters:
# Will fix the issues in the following PRs. # Will fix the issues in the following PRs.
# Issue: https://github.com/karmada-io/karmada/issues/6273. # Issue: https://github.com/karmada-io/karmada/issues/6273.
- "-QF1008" - "-QF1008"
- "-ST1019"
- "-ST1005" - "-ST1005"
- "-QF1004" - "-QF1004"
- "-ST1011" - "-ST1011"

View File

@ -52,7 +52,6 @@ import (
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient" "github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient"
"github.com/karmada-io/karmada/pkg/metrics" "github.com/karmada-io/karmada/pkg/metrics"
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
"github.com/karmada-io/karmada/pkg/resourceinterpreter" "github.com/karmada-io/karmada/pkg/resourceinterpreter"
"github.com/karmada-io/karmada/pkg/sharedcli" "github.com/karmada-io/karmada/pkg/sharedcli"
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag" "github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
@ -232,7 +231,7 @@ func run(ctx context.Context, opts *options.Options) error {
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...) ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...) ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...) ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector()) ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
if err = setupControllers(controllerManager, opts, ctx.Done()); err != nil { if err = setupControllers(controllerManager, opts, ctx.Done()); err != nil {
return err return err

View File

@ -75,7 +75,6 @@ import (
"github.com/karmada-io/karmada/pkg/features" "github.com/karmada-io/karmada/pkg/features"
"github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient" "github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient"
"github.com/karmada-io/karmada/pkg/metrics" "github.com/karmada-io/karmada/pkg/metrics"
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
"github.com/karmada-io/karmada/pkg/resourceinterpreter" "github.com/karmada-io/karmada/pkg/resourceinterpreter"
"github.com/karmada-io/karmada/pkg/sharedcli" "github.com/karmada-io/karmada/pkg/sharedcli"
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag" "github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
@ -193,7 +192,7 @@ func Run(ctx context.Context, opts *options.Options) error {
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...) ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...) ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...) ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector()) ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
if err := helper.IndexWork(ctx, controllerManager); err != nil { if err := helper.IndexWork(ctx, controllerManager); err != nil {
klog.Fatalf("Failed to index Work: %v", err) klog.Fatalf("Failed to index Work: %v", err)

View File

@ -27,7 +27,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
"github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
"github.com/karmada-io/karmada/operator/pkg/certs" "github.com/karmada-io/karmada/operator/pkg/certs"
"github.com/karmada-io/karmada/operator/pkg/constants" "github.com/karmada-io/karmada/operator/pkg/constants"
@ -387,8 +386,8 @@ func TestRunCertTask(t *testing.T) {
runData: &TestInitData{ runData: &TestInitData{
Name: "karmada-demo", Name: "karmada-demo",
Namespace: "test", Namespace: "test",
ComponentsUnits: &v1alpha1.KarmadaComponents{ ComponentsUnits: &operatorv1alpha1.KarmadaComponents{
KarmadaAPIServer: &v1alpha1.KarmadaAPIServer{}, KarmadaAPIServer: &operatorv1alpha1.KarmadaAPIServer{},
}, },
}, },
prep: func(ca *certs.CertConfig, _ *certs.CertConfig, rd workflow.RunData) error { prep: func(ca *certs.CertConfig, _ *certs.CertConfig, rd workflow.RunData) error {

View File

@ -30,7 +30,6 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake"
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
@ -95,7 +94,7 @@ func Test_findRBACSubjectsWithCluster(t *testing.T) {
{ {
name: "find rbac subjects with cluster", name: "find rbac subjects with cluster",
args: args{ args: args{
c: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects( c: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(
clusterRoleWithCluster, clusterRoleWithSearch, clusterRoleBindingWithCluster, clusterRoleBindingWithSearch).Build(), clusterRoleWithCluster, clusterRoleWithSearch, clusterRoleBindingWithCluster, clusterRoleBindingWithSearch).Build(),
cluster: "member1", cluster: "member1",
}, },
@ -241,7 +240,7 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &Controller{ c := &Controller{
Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(),
EventRecorder: record.NewFakeRecorder(1024), EventRecorder: record.NewFakeRecorder(1024),
} }
if got := c.generateRequestsFromClusterRole(context.Background(), tt.args.clusterRole); !reflect.DeepEqual(got, tt.want) { if got := c.generateRequestsFromClusterRole(context.Background(), tt.args.clusterRole); !reflect.DeepEqual(got, tt.want) {

View File

@ -30,7 +30,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
@ -61,10 +60,10 @@ users:
` `
tests := []struct { tests := []struct {
name string name string
client clientset.Interface client kubernetes.Interface
cfgFile string cfgFile string
prep func(cfgFile string) error prep func(cfgFile string) error
verify func(clientset.Interface) error verify func(kubernetes.Interface) error
cleanup func(cfgFile string) error cleanup func(cfgFile string) error
wantErr bool wantErr bool
errMsg string errMsg string
@ -72,7 +71,7 @@ users:
{ {
name: "CreateBootstrapConfigMapIfNotExists_NonExistentConfigFile_FailedToLoadAdminKubeConfig", name: "CreateBootstrapConfigMapIfNotExists_NonExistentConfigFile_FailedToLoadAdminKubeConfig",
prep: func(string) error { return nil }, prep: func(string) error { return nil },
verify: func(clientset.Interface) error { return nil }, verify: func(kubernetes.Interface) error { return nil },
cleanup: func(string) error { return nil }, cleanup: func(string) error { return nil },
wantErr: true, wantErr: true,
errMsg: "failed to load admin kubeconfig", errMsg: "failed to load admin kubeconfig",
@ -107,7 +106,7 @@ users:
} }
return nil return nil
}, },
verify: func(c clientset.Interface) error { verify: func(c kubernetes.Interface) error {
return verifyKubeAdminKubeConfig(c) return verifyKubeAdminKubeConfig(c)
}, },
wantErr: false, wantErr: false,
@ -146,12 +145,12 @@ func TestCreateClusterInfoRBACRules(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
client kubernetes.Interface client kubernetes.Interface
verify func(clientset.Interface) error verify func(kubernetes.Interface) error
}{ }{
{ {
name: "CreateClusterInfoRBACRules_CreateRolesAndRoleBindings_Created", name: "CreateClusterInfoRBACRules_CreateRolesAndRoleBindings_Created",
client: fakeclientset.NewClientset(), client: fakeclientset.NewClientset(),
verify: func(c clientset.Interface) error { verify: func(c kubernetes.Interface) error {
// Verify that roles are created as expected. // Verify that roles are created as expected.
role, err := c.RbacV1().Roles(metav1.NamespacePublic).Get(context.TODO(), BootstrapSignerClusterRoleName, metav1.GetOptions{}) role, err := c.RbacV1().Roles(metav1.NamespacePublic).Get(context.TODO(), BootstrapSignerClusterRoleName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -197,7 +196,7 @@ func TestCreateClusterInfoRBACRules(t *testing.T) {
} }
} }
func verifyKubeAdminKubeConfig(client clientset.Interface) error { func verifyKubeAdminKubeConfig(client kubernetes.Interface) error {
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) configMap, err := client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get configmap %s, got an error: %v", bootstrapapi.ConfigMapClusterInfo, err) return fmt.Errorf("failed to get configmap %s, got an error: %v", bootstrapapi.ConfigMapClusterInfo, err)

View File

@ -42,7 +42,6 @@ import (
karmadafake "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake" karmadafake "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake"
workv1alpha2lister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2" workv1alpha2lister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/scheduler/core" "github.com/karmada-io/karmada/pkg/scheduler/core"
schedulercore "github.com/karmada-io/karmada/pkg/scheduler/core"
internalqueue "github.com/karmada-io/karmada/pkg/scheduler/internal/queue" internalqueue "github.com/karmada-io/karmada/pkg/scheduler/internal/queue"
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
"github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util"
@ -119,8 +118,8 @@ func TestDoSchedule(t *testing.T) {
} }
mockAlgo := &mockAlgorithm{ mockAlgo := &mockAlgorithm{
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) { scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) {
return schedulercore.ScheduleResult{ return core.ScheduleResult{
SuggestedClusters: []workv1alpha2.TargetCluster{ SuggestedClusters: []workv1alpha2.TargetCluster{
{Name: "cluster1", Replicas: 1}, {Name: "cluster1", Replicas: 1},
}, },
@ -1095,8 +1094,8 @@ func TestWorkerAndScheduleNext(t *testing.T) {
clusterBindingLister := &fakeClusterBindingLister{binding: clusterResourceBinding} clusterBindingLister := &fakeClusterBindingLister{binding: clusterResourceBinding}
mockAlgo := &mockAlgorithm{ mockAlgo := &mockAlgorithm{
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) { scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) {
return schedulercore.ScheduleResult{ return core.ScheduleResult{
SuggestedClusters: []workv1alpha2.TargetCluster{ SuggestedClusters: []workv1alpha2.TargetCluster{
{Name: "cluster1", Replicas: 1}, {Name: "cluster1", Replicas: 1},
}, },