Added tests for multiclusterservice controller
Signed-off-by: Anuj Agrawal <anujagrawal380@gmail.com>
This commit is contained in:
parent
2d95f7cbc7
commit
b7b508b599
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
Copyright 2024 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multiclusterservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
)
|
||||
|
||||
func TestGetEventHandler(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
existingHandler bool
|
||||
}{
|
||||
{
|
||||
name: "New handler",
|
||||
clusterName: "cluster1",
|
||||
existingHandler: false,
|
||||
},
|
||||
{
|
||||
name: "Existing handler",
|
||||
clusterName: "cluster2",
|
||||
existingHandler: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
controller := &EndpointSliceCollectController{
|
||||
eventHandlers: sync.Map{},
|
||||
worker: &mockAsyncWorker{},
|
||||
}
|
||||
if tc.existingHandler {
|
||||
controller.eventHandlers.Store(tc.clusterName, &mockResourceEventHandler{})
|
||||
}
|
||||
handler := controller.getEventHandler(tc.clusterName)
|
||||
assert.NotNil(t, handler, "Handler should not be nil")
|
||||
storedHandler, exists := controller.eventHandlers.Load(tc.clusterName)
|
||||
assert.True(t, exists, "Handler should be stored in eventHandlers")
|
||||
assert.Equal(t, handler, storedHandler, "Stored handler should match returned handler")
|
||||
if !tc.existingHandler {
|
||||
assert.IsType(t, &cache.ResourceEventHandlerFuncs{}, handler, "New handler should be of type *cache.ResourceEventHandlerFuncs")
|
||||
} else {
|
||||
assert.IsType(t, &mockResourceEventHandler{}, handler, "Existing handler should be of type *mockResourceEventHandler")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenHandlerFuncs(t *testing.T) {
|
||||
clusterName := "test-cluster"
|
||||
testObj := createTestEndpointSlice("test-object", "test-namespace")
|
||||
|
||||
t.Run("AddFunc", func(t *testing.T) {
|
||||
mockWorker := &mockAsyncWorker{}
|
||||
controller := &EndpointSliceCollectController{
|
||||
worker: mockWorker,
|
||||
}
|
||||
addFunc := controller.genHandlerAddFunc(clusterName)
|
||||
addFunc(testObj)
|
||||
assert.Equal(t, 1, mockWorker.addCount, "Add function should be called once")
|
||||
})
|
||||
|
||||
t.Run("UpdateFunc", func(t *testing.T) {
|
||||
mockWorker := &mockAsyncWorker{}
|
||||
controller := &EndpointSliceCollectController{
|
||||
worker: mockWorker,
|
||||
}
|
||||
updateFunc := controller.genHandlerUpdateFunc(clusterName)
|
||||
newObj := createTestEndpointSlice("test-object", "test-namespace")
|
||||
newObj.SetLabels(map[string]string{"new-label": "new-value"})
|
||||
|
||||
updateFunc(testObj, newObj)
|
||||
assert.Equal(t, 1, mockWorker.addCount, "Update function should be called once when objects are different")
|
||||
|
||||
updateFunc(testObj, testObj)
|
||||
assert.Equal(t, 1, mockWorker.addCount, "Update function should not be called when objects are the same")
|
||||
})
|
||||
|
||||
t.Run("DeleteFunc", func(t *testing.T) {
|
||||
mockWorker := &mockAsyncWorker{}
|
||||
controller := &EndpointSliceCollectController{
|
||||
worker: mockWorker,
|
||||
}
|
||||
deleteFunc := controller.genHandlerDeleteFunc(clusterName)
|
||||
deleteFunc(testObj)
|
||||
assert.Equal(t, 1, mockWorker.addCount, "Delete function should be called once")
|
||||
|
||||
deletedObj := cache.DeletedFinalStateUnknown{Obj: testObj}
|
||||
deleteFunc(deletedObj)
|
||||
assert.Equal(t, 2, mockWorker.addCount, "Delete function should be called for DeletedFinalStateUnknown")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetEndpointSliceWorkMeta(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingWork *workv1alpha1.Work
|
||||
endpointSlice *unstructured.Unstructured
|
||||
expectedMeta metav1.ObjectMeta
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "New work for EndpointSlice",
|
||||
endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false),
|
||||
expectedMeta: metav1.ObjectMeta{
|
||||
Name: "endpointslice-test-eps-default",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Existing work for EndpointSlice",
|
||||
existingWork: createExistingWork("endpointslice-test-eps-default", "test-cluster", "ExistingController"),
|
||||
endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false),
|
||||
expectedMeta: metav1.ObjectMeta{
|
||||
Name: "endpointslice-test-eps-default",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.EndpointSliceWorkManagedByLabel: "ExistingController.MultiClusterService",
|
||||
},
|
||||
Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Nil EndpointSlice",
|
||||
endpointSlice: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := createFakeClient(tc.existingWork)
|
||||
testFunc := func() (metav1.ObjectMeta, error) {
|
||||
return getEndpointSliceWorkMeta(context.TODO(), fakeClient, "test-cluster", "endpointslice-test-eps-default", tc.endpointSlice)
|
||||
}
|
||||
if tc.expectedError {
|
||||
assert.Panics(t, func() {
|
||||
_, err := testFunc()
|
||||
require.Error(t, err)
|
||||
}, "Expected a panic for nil EndpointSlice")
|
||||
} else {
|
||||
meta, err := testFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedMeta.Name, meta.Name)
|
||||
assert.Equal(t, tc.expectedMeta.Namespace, meta.Namespace)
|
||||
assert.Equal(t, tc.expectedMeta.Finalizers, meta.Finalizers)
|
||||
assert.True(t, compareLabels(meta.Labels, tc.expectedMeta.Labels),
|
||||
"Labels do not match. Expected: %v, Got: %v", tc.expectedMeta.Labels, meta.Labels)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanProviderClustersEndpointSliceWork(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingWork *workv1alpha1.Work
|
||||
expectedWork *workv1alpha1.Work
|
||||
expectedDelete bool
|
||||
}{
|
||||
{
|
||||
name: "Work managed by multiple controllers",
|
||||
existingWork: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.EndpointSliceWorkManagedByLabel: "MultiClusterService.OtherController",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedWork: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.EndpointSliceWorkManagedByLabel: "OtherController",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDelete: false,
|
||||
},
|
||||
{
|
||||
name: "Work managed only by MultiClusterService",
|
||||
existingWork: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.EndpointSliceWorkManagedByLabel: "MultiClusterService",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedWork: nil,
|
||||
expectedDelete: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
scheme := setupSchemeEndpointCollect()
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.existingWork).Build()
|
||||
err := cleanProviderClustersEndpointSliceWork(context.TODO(), fakeClient, tc.existingWork)
|
||||
assert.NoError(t, err, "Unexpected error in cleanProviderClustersEndpointSliceWork")
|
||||
|
||||
if tc.expectedDelete {
|
||||
err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, &workv1alpha1.Work{})
|
||||
assert.Error(t, err, "Expected Work to be deleted, but it still exists")
|
||||
assert.True(t, apierrors.IsNotFound(err), "Expected NotFound error, got %v", err)
|
||||
} else {
|
||||
updatedWork := &workv1alpha1.Work{}
|
||||
err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, updatedWork)
|
||||
assert.NoError(t, err, "Failed to get updated Work")
|
||||
assert.True(t, compareLabels(updatedWork.Labels, tc.expectedWork.Labels),
|
||||
"Labels mismatch. Expected %v, but got %v", tc.expectedWork.Labels, updatedWork.Labels)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper Functions
|
||||
|
||||
// Helper function to set up a scheme for EndpointSlice collection tests
|
||||
func setupSchemeEndpointCollect() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
_ = workv1alpha1.Install(scheme)
|
||||
_ = discoveryv1.AddToScheme(scheme)
|
||||
return scheme
|
||||
}
|
||||
|
||||
// Helper function to create a test EndpointSlice
|
||||
func createTestEndpointSlice(name, namespace string) *unstructured.Unstructured {
|
||||
endpointSlice := &discoveryv1.EndpointSlice{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "discovery.k8s.io/v1",
|
||||
Kind: "EndpointSlice",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
unstructuredObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(endpointSlice)
|
||||
return &unstructured.Unstructured{Object: unstructuredObj}
|
||||
}
|
||||
|
||||
// Helper function to create an EndpointSlice for testing with specific properties
|
||||
func createEndpointSliceForTest(name, namespace, serviceName string, isManaged bool) *unstructured.Unstructured {
|
||||
labels := map[string]interface{}{
|
||||
discoveryv1.LabelServiceName: serviceName,
|
||||
}
|
||||
if isManaged {
|
||||
labels[discoveryv1.LabelManagedBy] = util.EndpointSliceDispatchControllerLabelValue
|
||||
}
|
||||
return &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "discovery.k8s.io/v1",
|
||||
"kind": "EndpointSlice",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": namespace,
|
||||
"labels": labels,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create an existing Work resource for testing
|
||||
func createExistingWork(name, namespace, managedBy string) *workv1alpha1.Work {
|
||||
return &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
util.EndpointSliceWorkManagedByLabel: managedBy,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a fake client with an optional existing Work
|
||||
func createFakeClient(existingWork *workv1alpha1.Work) client.Client {
|
||||
scheme := setupSchemeEndpointCollect()
|
||||
objs := []client.Object{}
|
||||
if existingWork != nil {
|
||||
objs = append(objs, existingWork)
|
||||
}
|
||||
return fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
|
||||
}
|
||||
|
||||
// Helper function to compare two label maps, considering special handling for EndpointSliceWorkManagedByLabel
|
||||
func compareLabels(actual, expected map[string]string) bool {
|
||||
if len(actual) != len(expected) {
|
||||
return false
|
||||
}
|
||||
for k, v := range expected {
|
||||
actualV, exists := actual[k]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
if k == util.EndpointSliceWorkManagedByLabel {
|
||||
actualParts := strings.Split(actualV, ".")
|
||||
expectedParts := strings.Split(v, ".")
|
||||
sort.Strings(actualParts)
|
||||
sort.Strings(expectedParts)
|
||||
if !reflect.DeepEqual(actualParts, expectedParts) {
|
||||
return false
|
||||
}
|
||||
} else if actualV != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Mock implementations
|
||||
|
||||
type mockAsyncWorker struct {
|
||||
addCount int
|
||||
}
|
||||
|
||||
func (m *mockAsyncWorker) Add(_ interface{}) {
|
||||
m.addCount++
|
||||
}
|
||||
|
||||
func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {}
|
||||
|
||||
func (m *mockAsyncWorker) Enqueue(_ interface{}) {}
|
||||
|
||||
func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {}
|
||||
|
||||
type mockResourceEventHandler struct{}
|
||||
|
||||
func (m *mockResourceEventHandler) OnAdd(_ interface{}, _ bool) {}
|
||||
|
||||
func (m *mockResourceEventHandler) OnUpdate(_, _ interface{}) {}
|
||||
|
||||
func (m *mockResourceEventHandler) OnDelete(_ interface{}) {}
|
|
@ -0,0 +1,905 @@
|
|||
/*
|
||||
Copyright 2024 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multiclusterservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
)
|
||||
|
||||
func TestUpdateEndpointSliceDispatched(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mcs *networkingv1alpha1.MultiClusterService
|
||||
status metav1.ConditionStatus
|
||||
reason string
|
||||
message string
|
||||
expectedCondition metav1.Condition
|
||||
}{
|
||||
{
|
||||
name: "update status to true",
|
||||
mcs: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
status: metav1.ConditionTrue,
|
||||
reason: "EndpointSliceDispatchedSucceed",
|
||||
message: "EndpointSlice are dispatched successfully",
|
||||
expectedCondition: metav1.Condition{
|
||||
Type: networkingv1alpha1.EndpointSliceDispatched,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "EndpointSliceDispatchedSucceed",
|
||||
Message: "EndpointSlice are dispatched successfully",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mockClient := new(MockClient)
|
||||
mockStatusWriter := new(MockStatusWriter)
|
||||
|
||||
// Expectations Setup
|
||||
mockClient.On("Status").Return(mockStatusWriter)
|
||||
mockClient.On("Get", mock.Anything, mock.AnythingOfType("types.NamespacedName"), mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything).
|
||||
Run(func(args mock.Arguments) {
|
||||
arg := args.Get(2).(*networkingv1alpha1.MultiClusterService)
|
||||
*arg = *tt.mcs // Copy the input MCS to the output
|
||||
}).Return(nil)
|
||||
|
||||
mockStatusWriter.On("Update", mock.Anything, mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything).
|
||||
Run(func(args mock.Arguments) {
|
||||
mcs := args.Get(1).(*networkingv1alpha1.MultiClusterService)
|
||||
mcs.Status.Conditions = []metav1.Condition{tt.expectedCondition}
|
||||
}).Return(nil)
|
||||
|
||||
c := &EndpointsliceDispatchController{
|
||||
Client: mockClient,
|
||||
EventRecorder: record.NewFakeRecorder(100),
|
||||
}
|
||||
|
||||
err := c.updateEndpointSliceDispatched(context.Background(), tt.mcs, tt.status, tt.reason, tt.message)
|
||||
assert.NoError(t, err, "updateEndpointSliceDispatched should not return an error")
|
||||
|
||||
mockClient.AssertExpectations(t)
|
||||
mockStatusWriter.AssertExpectations(t)
|
||||
|
||||
assert.Len(t, tt.mcs.Status.Conditions, 1, "MCS should have one condition")
|
||||
if len(tt.mcs.Status.Conditions) > 0 {
|
||||
condition := tt.mcs.Status.Conditions[0]
|
||||
assert.Equal(t, tt.expectedCondition.Type, condition.Type)
|
||||
assert.Equal(t, tt.expectedCondition.Status, condition.Status)
|
||||
assert.Equal(t, tt.expectedCondition.Reason, condition.Reason)
|
||||
assert.Equal(t, tt.expectedCondition.Message, condition.Message)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewClusterFunc(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingObjs []client.Object
|
||||
inputObj client.Object
|
||||
expectedResult []reconcile.Request
|
||||
}{
|
||||
{
|
||||
name: "new cluster, matching MCS",
|
||||
existingObjs: []client.Object{
|
||||
&networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1alpha1.MultiClusterServiceSpec{
|
||||
ConsumerClusters: []networkingv1alpha1.ClusterSelector{
|
||||
{Name: "cluster1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
inputObj: &clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster1",
|
||||
},
|
||||
},
|
||||
expectedResult: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new cluster, no matching MCS",
|
||||
existingObjs: []client.Object{
|
||||
&networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1alpha1.MultiClusterServiceSpec{
|
||||
ConsumerClusters: []networkingv1alpha1.ClusterSelector{
|
||||
{Name: "cluster2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
inputObj: &clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster1",
|
||||
},
|
||||
},
|
||||
expectedResult: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := setupController(tt.existingObjs...)
|
||||
result := c.newClusterFunc()(context.Background(), tt.inputObj)
|
||||
assert.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterEndpointSliceWorks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingObjs []client.Object
|
||||
mcsNamespace string
|
||||
mcsName string
|
||||
expectedWorks int
|
||||
expectedError bool
|
||||
listError error
|
||||
}{
|
||||
{
|
||||
name: "find matching works",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work2",
|
||||
Namespace: "karmada-es-cluster2",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work3",
|
||||
Namespace: "karmada-es-cluster3",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "other-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mcsNamespace: "default",
|
||||
mcsName: "test-mcs",
|
||||
expectedWorks: 2,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "no matching works",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "other-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mcsNamespace: "default",
|
||||
mcsName: "test-mcs",
|
||||
expectedWorks: 0,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "works in different namespace",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "test-namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mcsNamespace: "test-namespace",
|
||||
mcsName: "test-mcs",
|
||||
expectedWorks: 1,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "list error",
|
||||
existingObjs: []client.Object{},
|
||||
mcsNamespace: "default",
|
||||
mcsName: "test-mcs",
|
||||
expectedWorks: 0,
|
||||
expectedError: true,
|
||||
listError: errors.New("fake list error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := setupController(tt.existingObjs...)
|
||||
if tt.listError != nil {
|
||||
c.Client = &fakeClient{Client: c.Client, listError: tt.listError}
|
||||
}
|
||||
works, err := c.getClusterEndpointSliceWorks(context.Background(), tt.mcsNamespace, tt.mcsName)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, works)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, works, tt.expectedWorks)
|
||||
for _, work := range works {
|
||||
assert.Equal(t, tt.mcsName, work.Labels[util.MultiClusterServiceNameLabel])
|
||||
assert.Equal(t, tt.mcsNamespace, work.Labels[util.MultiClusterServiceNamespaceLabel])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMultiClusterServiceFunc(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingObjs []client.Object
|
||||
inputObj client.Object
|
||||
expectedResult []reconcile.Request
|
||||
}{
|
||||
{
|
||||
name: "MCS with matching works",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work-1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work-2",
|
||||
Namespace: "karmada-es-cluster2",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "cluster2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
inputObj: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
expectedResult: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work-1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MCS with no matching works",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "other-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
inputObj: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
expectedResult: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := setupController(tt.existingObjs...)
|
||||
result := c.newMultiClusterServiceFunc()(context.Background(), tt.inputObj)
|
||||
assert.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanOrphanDispatchedEndpointSlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingObjs []client.Object
|
||||
mcs *networkingv1alpha1.MultiClusterService
|
||||
expectedDeletes int
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "clean orphan works",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "provider",
|
||||
},
|
||||
},
|
||||
},
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work2",
|
||||
Namespace: "karmada-es-cluster2",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "provider",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mcs: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1alpha1.MultiClusterServiceSpec{
|
||||
ConsumerClusters: []networkingv1alpha1.ClusterSelector{
|
||||
{Name: "cluster1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDeletes: 1,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "no orphan works",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "provider",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mcs: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1alpha1.MultiClusterServiceSpec{
|
||||
ConsumerClusters: []networkingv1alpha1.ClusterSelector{
|
||||
{Name: "cluster1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDeletes: 0,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "work without provision cluster annotation",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "work1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mcs: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1alpha1.MultiClusterServiceSpec{
|
||||
ConsumerClusters: []networkingv1alpha1.ClusterSelector{
|
||||
{Name: "cluster2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDeletes: 0,
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := setupSchemeEndpointDispatch()
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.existingObjs...).Build()
|
||||
c := &EndpointsliceDispatchController{
|
||||
Client: fakeClient,
|
||||
}
|
||||
err := c.cleanOrphanDispatchedEndpointSlice(context.Background(), tt.mcs)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
// Check if the expected number of works were deleted
|
||||
remainingWorks := &workv1alpha1.WorkList{}
|
||||
err = fakeClient.List(context.Background(), remainingWorks, &client.ListOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, remainingWorks.Items, len(tt.existingObjs)-tt.expectedDeletes)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureEndpointSliceWork(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mcs *networkingv1alpha1.MultiClusterService
|
||||
work *workv1alpha1.Work
|
||||
providerCluster string
|
||||
consumerCluster string
|
||||
expectedError bool
|
||||
expectedWork *workv1alpha1.Work
|
||||
}{
|
||||
{
|
||||
name: "create new work",
|
||||
mcs: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
work: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "karmada-es-provider",
|
||||
},
|
||||
Spec: workv1alpha1.WorkSpec{
|
||||
Workload: workv1alpha1.WorkloadTemplate{
|
||||
Manifests: []workv1alpha1.Manifest{
|
||||
{
|
||||
RawExtension: runtime.RawExtension{
|
||||
Raw: []byte(`{
|
||||
"apiVersion": "discovery.k8s.io/v1",
|
||||
"kind": "EndpointSlice",
|
||||
"metadata": {
|
||||
"name": "test-eps"
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"addresses": ["10.0.0.1"]
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 80
|
||||
}
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
providerCluster: "provider",
|
||||
consumerCluster: "consumer",
|
||||
expectedError: false,
|
||||
expectedWork: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "karmada-es-consumer",
|
||||
Finalizers: []string{util.ExecutionControllerFinalizer},
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "provider",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNameLabel: "test-mcs",
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
},
|
||||
},
|
||||
Spec: workv1alpha1.WorkSpec{
|
||||
Workload: workv1alpha1.WorkloadTemplate{
|
||||
Manifests: []workv1alpha1.Manifest{
|
||||
{
|
||||
RawExtension: runtime.RawExtension{
|
||||
Raw: []byte(`{
|
||||
"apiVersion": "discovery.k8s.io/v1",
|
||||
"kind": "EndpointSlice",
|
||||
"metadata": {
|
||||
"name": "provider-test-eps",
|
||||
"labels": {
|
||||
"kubernetes.io/service-name": "test-mcs",
|
||||
"endpointslice.kubernetes.io/managed-by": "endpointslice-dispatch-controller.karmada.io",
|
||||
"karmada.io/managed": "true"
|
||||
},
|
||||
"annotations": {
|
||||
"endpointslice.karmada.io/provision-cluster": "provider",
|
||||
"work.karmada.io/name": "test-work",
|
||||
"work.karmada.io/namespace": "karmada-es-consumer",
|
||||
"resourcetemplate.karmada.io/uid": ""
|
||||
}
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"addresses": ["10.0.0.1"]
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 80
|
||||
}
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty manifest",
|
||||
mcs: &networkingv1alpha1.MultiClusterService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-mcs",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
work: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "karmada-es-provider",
|
||||
},
|
||||
Spec: workv1alpha1.WorkSpec{
|
||||
Workload: workv1alpha1.WorkloadTemplate{
|
||||
Manifests: []workv1alpha1.Manifest{},
|
||||
},
|
||||
},
|
||||
},
|
||||
providerCluster: "provider",
|
||||
consumerCluster: "consumer",
|
||||
expectedError: false,
|
||||
expectedWork: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := setupSchemeEndpointDispatch()
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
c := &EndpointsliceDispatchController{
|
||||
Client: fakeClient,
|
||||
}
|
||||
|
||||
err := c.ensureEndpointSliceWork(context.Background(), tt.mcs, tt.work, tt.providerCluster, tt.consumerCluster)
|
||||
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
|
||||
if tt.expectedWork != nil {
|
||||
createdWork := &workv1alpha1.Work{}
|
||||
err = fakeClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: tt.expectedWork.Name,
|
||||
Namespace: tt.expectedWork.Namespace,
|
||||
}, createdWork)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tt.expectedWork.ObjectMeta.Name, createdWork.ObjectMeta.Name)
|
||||
assert.Equal(t, tt.expectedWork.ObjectMeta.Namespace, createdWork.ObjectMeta.Namespace)
|
||||
assert.Equal(t, tt.expectedWork.ObjectMeta.Finalizers, createdWork.ObjectMeta.Finalizers)
|
||||
assert.Equal(t, tt.expectedWork.ObjectMeta.Annotations, createdWork.ObjectMeta.Annotations)
|
||||
assert.Equal(t, tt.expectedWork.ObjectMeta.Labels, createdWork.ObjectMeta.Labels)
|
||||
|
||||
// Comparing manifests
|
||||
assert.Equal(t, len(tt.expectedWork.Spec.Workload.Manifests), len(createdWork.Spec.Workload.Manifests))
|
||||
if len(tt.expectedWork.Spec.Workload.Manifests) > 0 {
|
||||
expectedManifest := &unstructured.Unstructured{}
|
||||
createdManifest := &unstructured.Unstructured{}
|
||||
|
||||
err = expectedManifest.UnmarshalJSON(tt.expectedWork.Spec.Workload.Manifests[0].Raw)
|
||||
assert.NoError(t, err)
|
||||
err = createdManifest.UnmarshalJSON(createdWork.Spec.Workload.Manifests[0].Raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expectedManifest.GetName(), createdManifest.GetName())
|
||||
assert.Equal(t, expectedManifest.GetLabels(), createdManifest.GetLabels())
|
||||
assert.Equal(t, expectedManifest.GetAnnotations(), createdManifest.GetAnnotations())
|
||||
}
|
||||
} else {
|
||||
workList := &workv1alpha1.WorkList{}
|
||||
err = fakeClient.List(context.Background(), workList)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, workList.Items)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupEndpointSliceFromConsumerClusters(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingObjs []client.Object
|
||||
inputWork *workv1alpha1.Work
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "cleanup works in consumer clusters",
|
||||
existingObjs: []client.Object{
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work-1",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "cluster1",
|
||||
},
|
||||
},
|
||||
},
|
||||
&workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work-2",
|
||||
Namespace: "karmada-es-cluster2",
|
||||
Annotations: map[string]string{
|
||||
util.EndpointSliceProvisionClusterAnnotation: "cluster1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
inputWork: &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "karmada-es-cluster1",
|
||||
Finalizers: []string{
|
||||
util.MCSEndpointSliceDispatchControllerFinalizer,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := setupSchemeEndpointDispatch()
|
||||
c := &EndpointsliceDispatchController{
|
||||
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(append(tt.existingObjs, tt.inputWork)...).Build(),
|
||||
}
|
||||
|
||||
err := c.cleanupEndpointSliceFromConsumerClusters(context.Background(), tt.inputWork)
|
||||
if tt.expectedErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check if works are deleted
|
||||
for _, obj := range tt.existingObjs {
|
||||
work := obj.(*workv1alpha1.Work)
|
||||
err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: work.Namespace, Name: work.Name}, &workv1alpha1.Work{})
|
||||
assert.True(t, client.IgnoreNotFound(err) == nil)
|
||||
}
|
||||
|
||||
// Check if the finalizer is removed
|
||||
updatedWork := &workv1alpha1.Work{}
|
||||
err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.inputWork.Namespace, Name: tt.inputWork.Name}, updatedWork)
|
||||
assert.NoError(t, err)
|
||||
assert.NotContains(t, updatedWork.Finalizers, util.MCSEndpointSliceDispatchControllerFinalizer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper Functions
|
||||
|
||||
// Helper function to create and configure a runtime scheme for the controller
|
||||
func setupSchemeEndpointDispatch() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
_ = networkingv1alpha1.Install(scheme)
|
||||
_ = workv1alpha1.Install(scheme)
|
||||
_ = clusterv1alpha1.Install(scheme)
|
||||
_ = discoveryv1.AddToScheme(scheme)
|
||||
return scheme
|
||||
}
|
||||
|
||||
// Helper function to create a new EndpointsliceDispatchController with a fake client for testing
|
||||
func setupController(objs ...client.Object) *EndpointsliceDispatchController {
|
||||
scheme := setupSchemeEndpointDispatch()
|
||||
return &EndpointsliceDispatchController{
|
||||
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build(),
|
||||
EventRecorder: record.NewFakeRecorder(100),
|
||||
}
|
||||
}
|
||||
|
||||
// Mock implementations
|
||||
|
||||
// MockClient is a mock of client.Client interface
|
||||
type MockClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
|
||||
args := m.Called(ctx, key, obj, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
|
||||
args := m.Called(ctx, list, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
|
||||
args := m.Called(ctx, obj, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
|
||||
args := m.Called(ctx, obj, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
args := m.Called(ctx, obj, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
|
||||
args := m.Called(ctx, obj, patch, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
|
||||
args := m.Called(ctx, obj, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockClient) Status() client.StatusWriter {
|
||||
args := m.Called()
|
||||
return args.Get(0).(client.StatusWriter)
|
||||
}
|
||||
|
||||
func (m *MockClient) SubResource(subResource string) client.SubResourceClient {
|
||||
args := m.Called(subResource)
|
||||
return args.Get(0).(client.SubResourceClient)
|
||||
}
|
||||
|
||||
func (m *MockClient) Scheme() *runtime.Scheme {
|
||||
args := m.Called()
|
||||
return args.Get(0).(*runtime.Scheme)
|
||||
}
|
||||
|
||||
func (m *MockClient) RESTMapper() meta.RESTMapper {
|
||||
args := m.Called()
|
||||
return args.Get(0).(meta.RESTMapper)
|
||||
}
|
||||
|
||||
func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) {
|
||||
args := m.Called(obj)
|
||||
return args.Get(0).(schema.GroupVersionKind), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) {
|
||||
args := m.Called(obj)
|
||||
return args.Bool(0), args.Error(1)
|
||||
}
|
||||
|
||||
// MockStatusWriter is a mock of client.StatusWriter interface
|
||||
type MockStatusWriter struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockStatusWriter) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error {
|
||||
args := m.Called(ctx, obj, subResource, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error {
|
||||
args := m.Called(ctx, obj, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error {
|
||||
args := m.Called(ctx, obj, patch, opts)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// Custom fake client that can simulate list errors
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
listError error
|
||||
}
|
||||
|
||||
func (f *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
|
||||
if f.listError != nil {
|
||||
return f.listError
|
||||
}
|
||||
return f.Client.List(ctx, list, opts...)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue