Merge pull request #5681 from anujagrawal699/part2-addedTests-pkg/scheduler/scheduler.go
Added second part of unit tests for pkg/scheduler
This commit is contained in:
commit
4db886790f
|
@ -26,13 +26,16 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
dynamicfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
|
@ -40,6 +43,7 @@ import (
|
|||
workv1alpha2lister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/core"
|
||||
schedulercore "github.com/karmada-io/karmada/pkg/scheduler/core"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/grpcconnection"
|
||||
)
|
||||
|
@ -792,6 +796,407 @@ func TestPatchScheduleResultForResourceBinding(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestScheduleClusterResourceBindingWithClusterAffinity(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
binding *workv1alpha2.ClusterResourceBinding
|
||||
scheduleResult core.ScheduleResult
|
||||
scheduleError error
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "successful scheduling",
|
||||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-binding",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1", "cluster2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleResult: core.ScheduleResult{
|
||||
SuggestedClusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "cluster1", Replicas: 1},
|
||||
{Name: "cluster2", Replicas: 1},
|
||||
},
|
||||
},
|
||||
scheduleError: nil,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "scheduling error",
|
||||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-binding-error",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleResult: core.ScheduleResult{},
|
||||
scheduleError: errors.New("scheduling error"),
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := karmadafake.NewSimpleClientset(tt.binding)
|
||||
fakeRecorder := record.NewFakeRecorder(10)
|
||||
mockAlgorithm := &mockAlgorithm{
|
||||
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) {
|
||||
return tt.scheduleResult, tt.scheduleError
|
||||
},
|
||||
}
|
||||
s := &Scheduler{
|
||||
KarmadaClient: fakeClient,
|
||||
eventRecorder: fakeRecorder,
|
||||
Algorithm: mockAlgorithm,
|
||||
}
|
||||
|
||||
err := s.scheduleClusterResourceBindingWithClusterAffinity(tt.binding)
|
||||
|
||||
if (err != nil) != tt.expectError {
|
||||
t.Errorf("scheduleClusterResourceBindingWithClusterAffinity() error = %v, expectError %v", err, tt.expectError)
|
||||
}
|
||||
|
||||
// Check if a patch was applied
|
||||
actions := fakeClient.Actions()
|
||||
patchActions := filterPatchActions(actions)
|
||||
if tt.expectError {
|
||||
assert.Empty(t, patchActions, "Expected no patch actions for error case")
|
||||
} else {
|
||||
assert.NotEmpty(t, patchActions, "Expected patch actions for success case")
|
||||
}
|
||||
|
||||
// Check if an event was recorded
|
||||
select {
|
||||
case event := <-fakeRecorder.Events:
|
||||
if tt.expectError {
|
||||
assert.Contains(t, event, "ScheduleBindingFailed", "Expected ScheduleBindingFailed event")
|
||||
} else {
|
||||
assert.Contains(t, event, "ScheduleBindingSucceed", "Expected ScheduleBindingSucceed event")
|
||||
}
|
||||
default:
|
||||
t.Errorf("Expected an event to be recorded")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduleClusterResourceBindingWithClusterAffinities(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
binding *workv1alpha2.ClusterResourceBinding
|
||||
scheduleResults []core.ScheduleResult
|
||||
scheduleErrors []error
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "successful scheduling with first affinity",
|
||||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-binding",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{
|
||||
{
|
||||
AffinityName: "affinity1",
|
||||
ClusterAffinity: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
AffinityName: "affinity2",
|
||||
ClusterAffinity: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleResults: []core.ScheduleResult{
|
||||
{
|
||||
SuggestedClusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "cluster1", Replicas: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleErrors: []error{nil},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "successful scheduling with second affinity",
|
||||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-binding-2",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{
|
||||
{
|
||||
AffinityName: "affinity1",
|
||||
ClusterAffinity: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
AffinityName: "affinity2",
|
||||
ClusterAffinity: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleResults: []core.ScheduleResult{
|
||||
{},
|
||||
{
|
||||
SuggestedClusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "cluster2", Replicas: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleErrors: []error{errors.New("first affinity failed"), nil},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "all affinities fail",
|
||||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-binding-fail",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{
|
||||
{
|
||||
AffinityName: "affinity1",
|
||||
ClusterAffinity: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
AffinityName: "affinity2",
|
||||
ClusterAffinity: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scheduleResults: []core.ScheduleResult{{}, {}},
|
||||
scheduleErrors: []error{errors.New("first affinity failed"), errors.New("second affinity failed")},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := karmadafake.NewSimpleClientset(tt.binding)
|
||||
fakeRecorder := record.NewFakeRecorder(10)
|
||||
mockAlgorithm := &mockAlgorithm{
|
||||
scheduleFunc: func(_ context.Context, spec *workv1alpha2.ResourceBindingSpec, status *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) {
|
||||
index := getAffinityIndex(spec.Placement.ClusterAffinities, status.SchedulerObservedAffinityName)
|
||||
if index < len(tt.scheduleResults) {
|
||||
return tt.scheduleResults[index], tt.scheduleErrors[index]
|
||||
}
|
||||
return core.ScheduleResult{}, errors.New("unexpected call to Schedule")
|
||||
},
|
||||
}
|
||||
s := &Scheduler{
|
||||
KarmadaClient: fakeClient,
|
||||
eventRecorder: fakeRecorder,
|
||||
Algorithm: mockAlgorithm,
|
||||
}
|
||||
|
||||
err := s.scheduleClusterResourceBindingWithClusterAffinities(tt.binding)
|
||||
|
||||
if (err != nil) != tt.expectError {
|
||||
t.Errorf("scheduleClusterResourceBindingWithClusterAffinities() error = %v, expectError %v", err, tt.expectError)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorkerAndScheduleNext(t *testing.T) {
|
||||
testScheme := setupScheme()
|
||||
|
||||
resourceBinding := &workv1alpha2.ResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-binding",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
clusterResourceBinding := &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-binding",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeClient := karmadafake.NewSimpleClientset(resourceBinding, clusterResourceBinding)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key string
|
||||
shutdown bool
|
||||
expectResult bool
|
||||
}{
|
||||
{
|
||||
name: "Schedule ResourceBinding",
|
||||
key: "default/test-binding",
|
||||
shutdown: false,
|
||||
expectResult: true,
|
||||
},
|
||||
{
|
||||
name: "Schedule ClusterResourceBinding",
|
||||
key: "test-cluster-binding",
|
||||
shutdown: false,
|
||||
expectResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
bindingLister := &fakeBindingLister{binding: resourceBinding}
|
||||
clusterBindingLister := &fakeClusterBindingLister{binding: clusterResourceBinding}
|
||||
|
||||
mockAlgo := &mockAlgorithm{
|
||||
scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) {
|
||||
return schedulercore.ScheduleResult{
|
||||
SuggestedClusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "cluster1", Replicas: 1},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventRecorder := eventBroadcaster.NewRecorder(testScheme, corev1.EventSource{Component: "test-scheduler"})
|
||||
|
||||
s := &Scheduler{
|
||||
KarmadaClient: fakeClient,
|
||||
queue: queue,
|
||||
bindingLister: bindingLister,
|
||||
clusterBindingLister: clusterBindingLister,
|
||||
Algorithm: mockAlgo,
|
||||
eventRecorder: eventRecorder,
|
||||
}
|
||||
|
||||
s.queue.Add(tc.key)
|
||||
|
||||
if tc.shutdown {
|
||||
s.queue.ShutDown()
|
||||
}
|
||||
|
||||
result := s.scheduleNext()
|
||||
|
||||
assert.Equal(t, tc.expectResult, result, "scheduleNext return value mismatch")
|
||||
|
||||
if !tc.shutdown {
|
||||
assert.Equal(t, 0, s.queue.Len(), "Queue should be empty after processing")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlacementChanged(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
placement *policyv1alpha1.Placement
|
||||
appliedPlacementStr string
|
||||
observedAffinityName string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "placement changed",
|
||||
placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1", "cluster2"},
|
||||
},
|
||||
},
|
||||
appliedPlacementStr: `{"clusterAffinity":{"clusterNames":["cluster1"]}}`,
|
||||
observedAffinityName: "",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "placement not changed",
|
||||
placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1", "cluster2"},
|
||||
},
|
||||
},
|
||||
appliedPlacementStr: `{"clusterAffinity":{"clusterNames":["cluster1","cluster2"]}}`,
|
||||
observedAffinityName: "",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "invalid applied placement string",
|
||||
placement: &policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1", "cluster2"},
|
||||
},
|
||||
},
|
||||
appliedPlacementStr: `invalid json`,
|
||||
observedAffinityName: "",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty placement",
|
||||
placement: &policyv1alpha1.Placement{},
|
||||
appliedPlacementStr: `{}`,
|
||||
observedAffinityName: "",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rb := &workv1alpha2.ResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Placement: tt.placement,
|
||||
},
|
||||
Status: workv1alpha2.ResourceBindingStatus{
|
||||
SchedulerObservedAffinityName: tt.observedAffinityName,
|
||||
},
|
||||
}
|
||||
got := placementChanged(*rb.Spec.Placement, tt.appliedPlacementStr, rb.Status.SchedulerObservedAffinityName)
|
||||
assert.Equal(t, tt.want, got, "placementChanged() result mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateScheduler(t *testing.T) {
|
||||
dynamicClient := dynamicfake.NewSimpleDynamicClient(runtime.NewScheme())
|
||||
karmadaClient := karmadafake.NewSimpleClientset()
|
||||
|
@ -802,6 +1207,9 @@ func TestCreateScheduler(t *testing.T) {
|
|||
schedulerName := "test-scheduler"
|
||||
timeout := metav1.Duration{Duration: 5 * time.Second}
|
||||
|
||||
mockPlugins := []string{"plugin1", "plugin2"}
|
||||
mockRateLimiterOptions := ratelimiterflag.Options{}
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
opts []Option
|
||||
|
@ -815,6 +1223,7 @@ func TestCreateScheduler(t *testing.T) {
|
|||
schedulerEstimatorClientConfig *grpcconnection.ClientConfig
|
||||
enableEmptyWorkloadPropagation bool
|
||||
plugins []string
|
||||
rateLimiterOptions ratelimiterflag.Options
|
||||
}{
|
||||
{
|
||||
name: "scheduler with default configuration",
|
||||
|
@ -896,6 +1305,20 @@ func TestCreateScheduler(t *testing.T) {
|
|||
schedulerEstimatorPort: port,
|
||||
schedulerEstimatorTimeout: timeout,
|
||||
},
|
||||
{
|
||||
name: "scheduler with EnableSchedulerPlugin",
|
||||
opts: []Option{
|
||||
WithEnableSchedulerPlugin(mockPlugins),
|
||||
},
|
||||
plugins: mockPlugins,
|
||||
},
|
||||
{
|
||||
name: "scheduler with RateLimiterOptions",
|
||||
opts: []Option{
|
||||
WithRateLimiterOptions(mockRateLimiterOptions),
|
||||
},
|
||||
rateLimiterOptions: mockRateLimiterOptions,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
|
@ -932,10 +1355,17 @@ func TestCreateScheduler(t *testing.T) {
|
|||
if tc.enableEmptyWorkloadPropagation != sche.enableEmptyWorkloadPropagation {
|
||||
t.Errorf("unexpected enableEmptyWorkloadPropagation want %v, got %v", tc.enableEmptyWorkloadPropagation, sche.enableEmptyWorkloadPropagation)
|
||||
}
|
||||
if len(tc.plugins) > 0 && sche.Algorithm == nil {
|
||||
t.Errorf("expected Algorithm to be set when plugins are provided")
|
||||
}
|
||||
if tc.rateLimiterOptions != (ratelimiterflag.Options{}) && sche.queue == nil {
|
||||
t.Errorf("expected queue to be set when rate limiter options are provided")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func Test_patchBindingStatusCondition(t *testing.T) {
|
||||
|
||||
func TestPatchBindingStatusCondition(t *testing.T) {
|
||||
oneHourBefore := time.Now().Add(-1 * time.Hour).Round(time.Second)
|
||||
oneHourAfter := time.Now().Add(1 * time.Hour).Round(time.Second)
|
||||
|
||||
|
@ -1093,7 +1523,7 @@ func Test_patchBindingStatusCondition(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_patchBindingStatusWithAffinityName(t *testing.T) {
|
||||
func TestPatchBindingStatusWithAffinityName(t *testing.T) {
|
||||
karmadaClient := karmadafake.NewSimpleClientset()
|
||||
|
||||
tests := []struct {
|
||||
|
@ -1139,7 +1569,7 @@ func Test_patchBindingStatusWithAffinityName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_patchClusterBindingStatusCondition(t *testing.T) {
|
||||
func TestPatchClusterBindingStatusCondition(t *testing.T) {
|
||||
oneHourBefore := time.Now().Add(-1 * time.Hour).Round(time.Second)
|
||||
oneHourAfter := time.Now().Add(1 * time.Hour).Round(time.Second)
|
||||
|
||||
|
@ -1297,7 +1727,7 @@ func Test_patchClusterBindingStatusCondition(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_patchClusterBindingStatusWithAffinityName(t *testing.T) {
|
||||
func TestPatchClusterBindingStatusWithAffinityName(t *testing.T) {
|
||||
karmadaClient := karmadafake.NewSimpleClientset()
|
||||
|
||||
tests := []struct {
|
||||
|
@ -1350,7 +1780,7 @@ func Test_patchClusterBindingStatusWithAffinityName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_recordScheduleResultEventForResourceBinding(t *testing.T) {
|
||||
func TestRecordScheduleResultEventForResourceBinding(t *testing.T) {
|
||||
fakeRecorder := record.NewFakeRecorder(10)
|
||||
scheduler := &Scheduler{eventRecorder: fakeRecorder}
|
||||
|
||||
|
@ -1441,7 +1871,7 @@ func contains(event, msg string) bool {
|
|||
return len(event) >= len(msg) && event[len(event)-len(msg):] == msg
|
||||
}
|
||||
|
||||
func Test_recordScheduleResultEventForClusterResourceBinding(t *testing.T) {
|
||||
func TestRecordScheduleResultEventForClusterResourceBinding(t *testing.T) {
|
||||
fakeRecorder := record.NewFakeRecorder(10)
|
||||
scheduler := &Scheduler{eventRecorder: fakeRecorder}
|
||||
|
||||
|
@ -1529,7 +1959,7 @@ func Test_recordScheduleResultEventForClusterResourceBinding(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_targetClustersToString(t *testing.T) {
|
||||
func TestTargetClustersToString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
tcs []workv1alpha2.TargetCluster
|
||||
|
@ -1577,6 +2007,17 @@ func Test_targetClustersToString(t *testing.T) {
|
|||
|
||||
// Helper Functions
|
||||
|
||||
// Helper function to setup scheme for testing
|
||||
func setupScheme() *runtime.Scheme {
|
||||
s := runtime.NewScheme()
|
||||
|
||||
_ = scheme.AddToScheme(s)
|
||||
_ = workv1alpha2.Install(s)
|
||||
_ = policyv1alpha1.Install(s)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Helper function to filter patch actions
|
||||
func filterPatchActions(actions []clienttesting.Action) []clienttesting.PatchAction {
|
||||
var patchActions []clienttesting.PatchAction
|
||||
|
|
Loading…
Reference in New Issue