Merge pull request #3621 from realnumber666/add-ut/crb_status_controller

Add ut for `pkg/controllers/applicationfailover` and `pkg/controllers/status`
This commit is contained in:
karmada-bot 2023-06-06 12:31:23 +08:00 committed by GitHub
commit 527efd174f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1214 additions and 0 deletions

View File

@ -4,9 +4,78 @@ import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
)
func TestTimeStampProcess(t *testing.T) {
key := types.NamespacedName{
Namespace: "default",
Name: "test",
}
cluster := "cluster-1"
m := newWorkloadUnhealthyMap()
m.setTimeStamp(key, cluster)
res := m.hasWorkloadBeenUnhealthy(key, cluster)
assert.Equal(t, true, res)
time := m.getTimeStamp(key, cluster)
assert.NotEmpty(t, time)
m.delete(key)
res = m.hasWorkloadBeenUnhealthy(key, cluster)
assert.Equal(t, false, res)
}
func TestWorkloadUnhealthyMap_deleteIrrelevantClusters(t *testing.T) {
cluster1 := "cluster-1"
cluster2 := "cluster-2"
cluster3 := "cluster-3"
t.Run("normal case", func(t *testing.T) {
key := types.NamespacedName{
Namespace: "default",
Name: "test",
}
m := newWorkloadUnhealthyMap()
m.setTimeStamp(key, cluster1)
m.setTimeStamp(key, cluster2)
m.setTimeStamp(key, cluster3)
allClusters := sets.New[string](cluster2, cluster3)
healthyClusters := []string{cluster3}
m.deleteIrrelevantClusters(key, allClusters, healthyClusters)
res1 := m.hasWorkloadBeenUnhealthy(key, cluster1)
assert.Equal(t, false, res1)
res2 := m.hasWorkloadBeenUnhealthy(key, cluster2)
assert.Equal(t, true, res2)
res3 := m.hasWorkloadBeenUnhealthy(key, cluster3)
assert.Equal(t, false, res3)
})
t.Run("unhealthyClusters is nil", func(t *testing.T) {
key := types.NamespacedName{
Namespace: "default",
Name: "test",
}
m := newWorkloadUnhealthyMap()
allClusters := sets.New[string](cluster2, cluster3)
healthyClusters := []string{cluster3}
m.deleteIrrelevantClusters(key, allClusters, healthyClusters)
res := m.hasWorkloadBeenUnhealthy(key, cluster2)
assert.Equal(t, false, res)
})
}
func TestDistinguishUnhealthyClustersWithOthers(t *testing.T) {
tests := []struct {
name string

View File

@ -0,0 +1,377 @@
package applicationfailover
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/pkg/util/helper"
)
func generateCRBApplicationFailoverController() *CRBApplicationFailoverController {
m := newWorkloadUnhealthyMap()
c := &CRBApplicationFailoverController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
EventRecorder: &record.FakeRecorder{},
workloadUnhealthyMap: m,
}
return c
}
func TestCRBApplicationFailoverController_Reconcile(t *testing.T) {
t.Run("failed in clusterResourceBindingFilter", func(t *testing.T) {
binding := &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
c := generateCRBApplicationFailoverController()
// Prepare req
req := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "binding",
Namespace: "default",
},
}
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
res, err := c.Reconcile(context.Background(), req)
assert.Equal(t, controllerruntime.Result{}, res)
assert.Equal(t, nil, err)
})
t.Run("failed in c.Client.Get", func(t *testing.T) {
c := generateCRBApplicationFailoverController()
// Prepare req
req := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "binding",
Namespace: "default",
},
}
res, err := c.Reconcile(context.Background(), req)
assert.Equal(t, controllerruntime.Result{}, res)
assert.Equal(t, nil, err)
})
}
func TestCRBApplicationFailoverController_detectFailure(t *testing.T) {
cluster1 := "cluster1"
cluster2 := "cluster2"
key := types.NamespacedName{
Namespace: "default",
Name: "test",
}
t.Run("hasWorkloadBeenUnhealthy return false", func(t *testing.T) {
clusters := []string{cluster1, cluster2}
tolerationSeconds := int32(1)
c := generateCRBApplicationFailoverController()
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, tolerationSeconds, duration)
assert.Equal(t, []string(nil), needEvictClusters)
})
t.Run("more than the tolerance time", func(t *testing.T) {
clusters := []string{cluster1, cluster2}
tolerationSeconds := int32(1)
c := generateCRBApplicationFailoverController()
c.workloadUnhealthyMap.setTimeStamp(key, cluster1)
time.Sleep(2 * time.Second)
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, tolerationSeconds, duration)
assert.Equal(t, []string{"cluster1"}, needEvictClusters)
})
t.Run("less than the tolerance time", func(t *testing.T) {
clusters := []string{cluster1, cluster2}
tolerationSeconds := int32(100)
c := generateCRBApplicationFailoverController()
c.workloadUnhealthyMap.setTimeStamp(key, cluster1)
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, tolerationSeconds, duration)
assert.Equal(t, []string(nil), needEvictClusters)
})
t.Run("final duration is 0", func(t *testing.T) {
clusters := []string{}
tolerationSeconds := int32(100)
c := generateCRBApplicationFailoverController()
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, int32(0), duration)
assert.Equal(t, []string(nil), needEvictClusters)
})
}
func TestCRBApplicationFailoverController_syncBinding(t *testing.T) {
tolerationSeconds := int32(5)
c := generateCRBApplicationFailoverController()
binding := &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{
DecisionConditions: policyv1alpha1.DecisionConditions{
TolerationSeconds: &tolerationSeconds,
},
},
},
Clusters: []workv1alpha2.TargetCluster{
{
Name: "member1",
Replicas: 1,
},
{
Name: "member1",
Replicas: 2,
},
},
},
Status: workv1alpha2.ResourceBindingStatus{
AggregatedStatus: []workv1alpha2.AggregatedStatusItem{
{
ClusterName: "member1",
Health: workv1alpha2.ResourceHealthy,
},
{
ClusterName: "member2",
Health: workv1alpha2.ResourceUnhealthy,
},
},
},
}
dur, err := c.syncBinding(binding)
assert.Equal(t, 5*time.Second, dur)
assert.NoError(t, err)
}
func TestCRBApplicationFailoverController_evictBinding(t *testing.T) {
tests := []struct {
name string
purgeMode policyv1alpha1.PurgeMode
expectError bool
}{
{
name: "PurgeMode is Graciously",
purgeMode: policyv1alpha1.Graciously,
expectError: false,
},
{
name: "PurgeMode is Never",
purgeMode: policyv1alpha1.Never,
expectError: false,
},
{
name: "PurgeMode is Immediately",
purgeMode: policyv1alpha1.Immediately,
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateCRBApplicationFailoverController()
binding := &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{
PurgeMode: tt.purgeMode,
},
},
},
}
clusters := []string{"member1", "member2"}
err := c.evictBinding(binding, clusters)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestCRBApplicationFailoverController_updateBinding(t *testing.T) {
binding := &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
allClusters := sets.New("member1", "member2", "member3")
needEvictClusters := []string{"member1", "member2"}
c := generateCRBApplicationFailoverController()
t.Run("failed when c.Update", func(t *testing.T) {
err := c.updateBinding(binding, allClusters, needEvictClusters)
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
err := c.updateBinding(binding, allClusters, needEvictClusters)
assert.NoError(t, err)
})
}
func generateRaw() *runtime.RawExtension {
testTime := time.Now()
testV1time := metav1.NewTime(testTime)
statusMap := map[string]interface{}{
"active": 0,
"succeeded": 1,
"startTime": testV1time,
"completionTime": testV1time,
"failed": 0,
"conditions": []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}},
}
raw, _ := helper.BuildStatusRawExtension(statusMap)
return raw
}
func TestCRBApplicationFailoverController_clusterResourceBindingFilter(t *testing.T) {
tests := []struct {
name string
binding *workv1alpha2.ClusterResourceBinding
expectRes bool
}{
{
name: "crb.Spec.Failover and crb.Spec.Failover.Application is nil",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
},
expectRes: false,
},
{
name: "crb.Status.AggregatedStatus is 0",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{},
},
},
},
expectRes: false,
},
{
name: "error occurs in ConstructClusterWideKey",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "a/b/c",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{},
},
},
Status: workv1alpha2.ResourceBindingStatus{
AggregatedStatus: []workv1alpha2.AggregatedStatusItem{
{ClusterName: "memberA", Status: generateRaw()},
{ClusterName: "memberB", Status: generateRaw()},
},
},
},
expectRes: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateCRBApplicationFailoverController()
res := c.clusterResourceBindingFilter(tt.binding)
assert.Equal(t, tt.expectRes, res)
})
}
}

View File

@ -0,0 +1,358 @@
package applicationfailover
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util/gclient"
)
func generateRBApplicationFailoverController() *RBApplicationFailoverController {
m := newWorkloadUnhealthyMap()
c := &RBApplicationFailoverController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
EventRecorder: &record.FakeRecorder{},
workloadUnhealthyMap: m,
}
return c
}
func TestRBApplicationFailoverController_Reconcile(t *testing.T) {
t.Run("failed in bindingFilter", func(t *testing.T) {
binding := &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
c := generateRBApplicationFailoverController()
// Prepare req
req := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "binding",
Namespace: "default",
},
}
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
res, err := c.Reconcile(context.Background(), req)
assert.Equal(t, controllerruntime.Result{}, res)
assert.Equal(t, nil, err)
})
t.Run("failed in c.Client.Get", func(t *testing.T) {
c := generateRBApplicationFailoverController()
// Prepare req
req := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "binding",
Namespace: "default",
},
}
res, err := c.Reconcile(context.Background(), req)
assert.Equal(t, controllerruntime.Result{}, res)
assert.Equal(t, nil, err)
})
}
func TestRBApplicationFailoverController_detectFailure(t *testing.T) {
cluster1 := "cluster1"
cluster2 := "cluster2"
key := types.NamespacedName{
Namespace: "default",
Name: "test",
}
t.Run("hasWorkloadBeenUnhealthy return false", func(t *testing.T) {
clusters := []string{cluster1, cluster2}
tolerationSeconds := int32(1)
c := generateRBApplicationFailoverController()
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, tolerationSeconds, duration)
assert.Equal(t, []string(nil), needEvictClusters)
})
t.Run("more than the tolerance time", func(t *testing.T) {
clusters := []string{cluster1, cluster2}
tolerationSeconds := int32(1)
c := generateRBApplicationFailoverController()
c.workloadUnhealthyMap.setTimeStamp(key, cluster1)
time.Sleep(2 * time.Second)
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, tolerationSeconds, duration)
assert.Equal(t, []string{"cluster1"}, needEvictClusters)
})
t.Run("less than the tolerance time", func(t *testing.T) {
clusters := []string{cluster1, cluster2}
tolerationSeconds := int32(100)
c := generateRBApplicationFailoverController()
c.workloadUnhealthyMap.setTimeStamp(key, cluster1)
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, tolerationSeconds, duration)
assert.Equal(t, []string(nil), needEvictClusters)
})
t.Run("final duration is 0", func(t *testing.T) {
clusters := []string{}
tolerationSeconds := int32(100)
c := generateRBApplicationFailoverController()
duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key)
assert.Equal(t, int32(0), duration)
assert.Equal(t, []string(nil), needEvictClusters)
})
}
func TestRBApplicationFailoverController_syncBinding(t *testing.T) {
tolerationSeconds := int32(5)
c := generateRBApplicationFailoverController()
binding := &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{
DecisionConditions: policyv1alpha1.DecisionConditions{
TolerationSeconds: &tolerationSeconds,
},
},
},
Clusters: []workv1alpha2.TargetCluster{
{
Name: "member1",
Replicas: 1,
},
{
Name: "member1",
Replicas: 2,
},
},
},
Status: workv1alpha2.ResourceBindingStatus{
AggregatedStatus: []workv1alpha2.AggregatedStatusItem{
{
ClusterName: "member1",
Health: workv1alpha2.ResourceHealthy,
},
{
ClusterName: "member2",
Health: workv1alpha2.ResourceUnhealthy,
},
},
},
}
dur, err := c.syncBinding(binding)
assert.Equal(t, 5*time.Second, dur)
assert.NoError(t, err)
}
func TestRBApplicationFailoverController_evictBinding(t *testing.T) {
tests := []struct {
name string
purgeMode policyv1alpha1.PurgeMode
expectError bool
}{
{
name: "PurgeMode is Graciously",
purgeMode: policyv1alpha1.Graciously,
expectError: false,
},
{
name: "PurgeMode is Never",
purgeMode: policyv1alpha1.Never,
expectError: false,
},
{
name: "PurgeMode is Immediately",
purgeMode: policyv1alpha1.Immediately,
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateRBApplicationFailoverController()
binding := &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{
PurgeMode: tt.purgeMode,
},
},
},
}
clusters := []string{"member1", "member2"}
err := c.evictBinding(binding, clusters)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestRBApplicationFailoverController_updateBinding(t *testing.T) {
binding := &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
}
allClusters := sets.New("member1", "member2", "member3")
needEvictClusters := []string{"member1", "member2"}
c := generateRBApplicationFailoverController()
t.Run("failed when c.Update", func(t *testing.T) {
err := c.updateBinding(binding, allClusters, needEvictClusters)
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
err := c.updateBinding(binding, allClusters, needEvictClusters)
assert.NoError(t, err)
})
}
func TestRBApplicationFailoverController_clusterResourceBindingFilter(t *testing.T) {
tests := []struct {
name string
binding *workv1alpha2.ResourceBinding
expectRes bool
}{
{
name: "crb.Spec.Failover and crb.Spec.Failover.Application is nil",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
},
expectRes: false,
},
{
name: "crb.Status.AggregatedStatus is 0",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{},
},
},
},
expectRes: false,
},
{
name: "error occurs in ConstructClusterWideKey",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "a/b/c",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
Failover: &policyv1alpha1.FailoverBehavior{
Application: &policyv1alpha1.ApplicationFailoverBehavior{},
},
},
Status: workv1alpha2.ResourceBindingStatus{
AggregatedStatus: []workv1alpha2.AggregatedStatusItem{
{ClusterName: "memberA", Status: generateRaw()},
{ClusterName: "memberB", Status: generateRaw()},
},
},
},
expectRes: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateRBApplicationFailoverController()
res := c.bindingFilter(tt.binding)
assert.Equal(t, tt.expectRes, res)
})
}
}

View File

@ -0,0 +1,205 @@
package status
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
)
func generateCRBStatusController() *CRBStatusController {
stopCh := make(chan struct{})
defer close(stopCh)
dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme,
&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}})
m := genericmanager.NewSingleClusterInformerManager(dynamicClient, 0, stopCh)
m.Lister(corev1.SchemeGroupVersion.WithResource("pods"))
m.Start()
m.WaitForCacheSync()
c := &CRBStatusController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
DynamicClient: dynamicClient,
InformerManager: m,
RESTMapper: func() meta.RESTMapper {
m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion})
m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace)
return m
}(),
EventRecorder: &record.FakeRecorder{},
}
return c
}
func TestCRBStatusController_Reconcile(t *testing.T) {
preTime := metav1.Date(2023, 0, 0, 0, 0, 0, 0, time.UTC)
tests := []struct {
name string
binding *workv1alpha2.ClusterResourceBinding
expectRes controllerruntime.Result
expectError bool
}{
{
name: "failed in syncBindingStatus",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
},
expectRes: controllerruntime.Result{},
expectError: false,
},
{
name: "binding not found in client",
expectRes: controllerruntime.Result{},
expectError: false,
},
{
name: "failed in syncBindingStatus",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
DeletionTimestamp: &preTime,
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
},
expectRes: controllerruntime.Result{},
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateCRBStatusController()
// Prepare req
req := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "binding",
Namespace: "default",
},
}
// Prepare binding and create it in client
if tt.binding != nil {
if err := c.Client.Create(context.Background(), tt.binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
}
res, err := c.Reconcile(context.Background(), req)
assert.Equal(t, tt.expectRes, res)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestCRBStatusController_syncBindingStatus(t *testing.T) {
tests := []struct {
name string
resource workv1alpha2.ObjectReference
podNameInDynamicClient string
resourceExistInClient bool
expectedError bool
}{
{
name: "failed in FetchResourceTemplate, err is NotFound",
resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
podNameInDynamicClient: "pod1",
resourceExistInClient: true,
expectedError: false,
},
{
name: "failed in FetchResourceTemplate, err is not NotFound",
resource: workv1alpha2.ObjectReference{},
podNameInDynamicClient: "pod",
resourceExistInClient: true,
expectedError: true,
},
{
name: "failed in AggregateClusterResourceBindingWorkStatus",
resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
podNameInDynamicClient: "pod",
resourceExistInClient: false,
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateCRBStatusController()
c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme,
&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}})
binding := &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: tt.resource,
},
}
if tt.resourceExistInClient {
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
}
err := c.syncBindingStatus(binding)
if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}

View File

@ -0,0 +1,205 @@
package status
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
)
func generateRBStatusController() *RBStatusController {
stopCh := make(chan struct{})
defer close(stopCh)
dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme,
&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}})
m := genericmanager.NewSingleClusterInformerManager(dynamicClient, 0, stopCh)
m.Lister(corev1.SchemeGroupVersion.WithResource("pods"))
m.Start()
m.WaitForCacheSync()
c := &RBStatusController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
DynamicClient: dynamicClient,
InformerManager: m,
RESTMapper: func() meta.RESTMapper {
m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion})
m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace)
return m
}(),
EventRecorder: &record.FakeRecorder{},
}
return c
}
func TestRBStatusController_Reconcile(t *testing.T) {
preTime := metav1.Date(2023, 0, 0, 0, 0, 0, 0, time.UTC)
tests := []struct {
name string
binding *workv1alpha2.ResourceBinding
expectRes controllerruntime.Result
expectError bool
}{
{
name: "failed in syncBindingStatus",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
},
expectRes: controllerruntime.Result{},
expectError: false,
},
{
name: "binding not found in client",
expectRes: controllerruntime.Result{},
expectError: false,
},
{
name: "failed in syncBindingStatus",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
DeletionTimestamp: &preTime,
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
},
},
expectRes: controllerruntime.Result{},
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateRBStatusController()
// Prepare req
req := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "binding",
Namespace: "default",
},
}
// Prepare binding and create it in client
if tt.binding != nil {
if err := c.Client.Create(context.Background(), tt.binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
}
res, err := c.Reconcile(context.Background(), req)
assert.Equal(t, tt.expectRes, res)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestRBStatusController_syncBindingStatus(t *testing.T) {
tests := []struct {
name string
resource workv1alpha2.ObjectReference
podNameInDynamicClient string
resourceExistInClient bool
expectedError bool
}{
{
name: "failed in FetchResourceTemplate, err is NotFound",
resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
podNameInDynamicClient: "pod1",
resourceExistInClient: true,
expectedError: false,
},
{
name: "failed in FetchResourceTemplate, err is not NotFound",
resource: workv1alpha2.ObjectReference{},
podNameInDynamicClient: "pod",
resourceExistInClient: true,
expectedError: true,
},
{
name: "failed in AggregateClusterResourceBindingWorkStatus",
resource: workv1alpha2.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Namespace: "default",
Name: "pod",
},
podNameInDynamicClient: "pod",
resourceExistInClient: false,
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateRBStatusController()
c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme,
&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}})
binding := &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
},
Spec: workv1alpha2.ResourceBindingSpec{
Resource: tt.resource,
},
}
if tt.resourceExistInClient {
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
}
err := c.syncBindingStatus(binding)
if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}