Merge pull request #6289 from seanlaii/fix-ST1019
Fix lint issue ST1019
This commit is contained in:
commit
8659599d97
|
@ -70,7 +70,6 @@ linters:
|
|||
# Will fix the issues in the following PRs.
|
||||
# Issue: https://github.com/karmada-io/karmada/issues/6273.
|
||||
- "-QF1008"
|
||||
- "-ST1019"
|
||||
- "-ST1005"
|
||||
- "-QF1004"
|
||||
- "-ST1011"
|
||||
|
|
|
@ -52,7 +52,6 @@ import (
|
|||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient"
|
||||
"github.com/karmada-io/karmada/pkg/metrics"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/resourceinterpreter"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
|
@ -232,7 +231,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
|
||||
|
||||
if err = setupControllers(controllerManager, opts, ctx.Done()); err != nil {
|
||||
return err
|
||||
|
|
|
@ -75,7 +75,6 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/features"
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient"
|
||||
"github.com/karmada-io/karmada/pkg/metrics"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/resourceinterpreter"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
|
@ -193,7 +192,7 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
|
||||
|
||||
if err := helper.IndexWork(ctx, controllerManager); err != nil {
|
||||
klog.Fatalf("Failed to index Work: %v", err)
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
|
||||
operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1"
|
||||
"github.com/karmada-io/karmada/operator/pkg/certs"
|
||||
"github.com/karmada-io/karmada/operator/pkg/constants"
|
||||
|
@ -387,8 +386,8 @@ func TestRunCertTask(t *testing.T) {
|
|||
runData: &TestInitData{
|
||||
Name: "karmada-demo",
|
||||
Namespace: "test",
|
||||
ComponentsUnits: &v1alpha1.KarmadaComponents{
|
||||
KarmadaAPIServer: &v1alpha1.KarmadaAPIServer{},
|
||||
ComponentsUnits: &operatorv1alpha1.KarmadaComponents{
|
||||
KarmadaAPIServer: &operatorv1alpha1.KarmadaAPIServer{},
|
||||
},
|
||||
},
|
||||
prep: func(ca *certs.CertConfig, _ *certs.CertConfig, rd workflow.RunData) error {
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
|
@ -95,7 +94,7 @@ func Test_findRBACSubjectsWithCluster(t *testing.T) {
|
|||
{
|
||||
name: "find rbac subjects with cluster",
|
||||
args: args{
|
||||
c: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(
|
||||
c: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(
|
||||
clusterRoleWithCluster, clusterRoleWithSearch, clusterRoleBindingWithCluster, clusterRoleBindingWithSearch).Build(),
|
||||
cluster: "member1",
|
||||
},
|
||||
|
@ -241,7 +240,7 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &Controller{
|
||||
Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(),
|
||||
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(),
|
||||
EventRecorder: record.NewFakeRecorder(1024),
|
||||
}
|
||||
if got := c.generateRequestsFromClusterRole(context.Background(), tt.args.clusterRole); !reflect.DeepEqual(got, tt.want) {
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
|
||||
|
@ -61,10 +60,10 @@ users:
|
|||
`
|
||||
tests := []struct {
|
||||
name string
|
||||
client clientset.Interface
|
||||
client kubernetes.Interface
|
||||
cfgFile string
|
||||
prep func(cfgFile string) error
|
||||
verify func(clientset.Interface) error
|
||||
verify func(kubernetes.Interface) error
|
||||
cleanup func(cfgFile string) error
|
||||
wantErr bool
|
||||
errMsg string
|
||||
|
@ -72,7 +71,7 @@ users:
|
|||
{
|
||||
name: "CreateBootstrapConfigMapIfNotExists_NonExistentConfigFile_FailedToLoadAdminKubeConfig",
|
||||
prep: func(string) error { return nil },
|
||||
verify: func(clientset.Interface) error { return nil },
|
||||
verify: func(kubernetes.Interface) error { return nil },
|
||||
cleanup: func(string) error { return nil },
|
||||
wantErr: true,
|
||||
errMsg: "failed to load admin kubeconfig",
|
||||
|
@ -107,7 +106,7 @@ users:
|
|||
}
|
||||
return nil
|
||||
},
|
||||
verify: func(c clientset.Interface) error {
|
||||
verify: func(c kubernetes.Interface) error {
|
||||
return verifyKubeAdminKubeConfig(c)
|
||||
},
|
||||
wantErr: false,
|
||||
|
@ -146,12 +145,12 @@ func TestCreateClusterInfoRBACRules(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
client kubernetes.Interface
|
||||
verify func(clientset.Interface) error
|
||||
verify func(kubernetes.Interface) error
|
||||
}{
|
||||
{
|
||||
name: "CreateClusterInfoRBACRules_CreateRolesAndRoleBindings_Created",
|
||||
client: fakeclientset.NewClientset(),
|
||||
verify: func(c clientset.Interface) error {
|
||||
verify: func(c kubernetes.Interface) error {
|
||||
// Verify that roles are created as expected.
|
||||
role, err := c.RbacV1().Roles(metav1.NamespacePublic).Get(context.TODO(), BootstrapSignerClusterRoleName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -197,7 +196,7 @@ func TestCreateClusterInfoRBACRules(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func verifyKubeAdminKubeConfig(client clientset.Interface) error {
|
||||
func verifyKubeAdminKubeConfig(client kubernetes.Interface) error {
|
||||
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get configmap %s, got an error: %v", bootstrapapi.ConfigMapClusterInfo, err)
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
karmadafake "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake"
|
||||
workv1alpha2lister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/core"
|
||||
schedulercore "github.com/karmada-io/karmada/pkg/scheduler/core"
|
||||
internalqueue "github.com/karmada-io/karmada/pkg/scheduler/internal/queue"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
|
@ -119,8 +118,8 @@ func TestDoSchedule(t *testing.T) {
|
|||
}
|
||||
|
||||
mockAlgo := &mockAlgorithm{
|
||||
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) {
|
||||
return schedulercore.ScheduleResult{
|
||||
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) {
|
||||
return core.ScheduleResult{
|
||||
SuggestedClusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "cluster1", Replicas: 1},
|
||||
},
|
||||
|
@ -1095,8 +1094,8 @@ func TestWorkerAndScheduleNext(t *testing.T) {
|
|||
clusterBindingLister := &fakeClusterBindingLister{binding: clusterResourceBinding}
|
||||
|
||||
mockAlgo := &mockAlgorithm{
|
||||
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) {
|
||||
return schedulercore.ScheduleResult{
|
||||
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) {
|
||||
return core.ScheduleResult{
|
||||
SuggestedClusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "cluster1", Replicas: 1},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue