From bc883c86b1bcfc3e8f7988c5c823d91e0d00a568 Mon Sep 17 00:00:00 2001 From: Xinzhao Xu Date: Wed, 19 Jan 2022 11:51:58 +0800 Subject: [PATCH] Add e2e test for the AggregateStatus hook Signed-off-by: Xinzhao Xu --- test/e2e/framework/workload.go | 4 +- test/e2e/resourceinterpreter_test.go | 55 ++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/test/e2e/framework/workload.go b/test/e2e/framework/workload.go index 4689059b3..88c7058c2 100644 --- a/test/e2e/framework/workload.go +++ b/test/e2e/framework/workload.go @@ -32,13 +32,13 @@ func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa } // UpdateWorkload update Workload with dynamic client -func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string) { +func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string, subresources ...string) { ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() { newUnstructuredObj, err := helper.ToUnstructured(workload) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - _, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{}) + _, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{}, subresources...) return err }, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred()) }) diff --git a/test/e2e/resourceinterpreter_test.go b/test/e2e/resourceinterpreter_test.go index 95c92d6cc..9a3ae0dc7 100644 --- a/test/e2e/resourceinterpreter_test.go +++ b/test/e2e/resourceinterpreter_test.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -178,4 +179,58 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() { framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) }) }) + + ginkgo.Context("InterpreterOperation AggregateStatus testing", func() { + policyNamespace := testNamespace + policyName := workloadNamePrefix + rand.String(RandomStrLength) + workloadNamespace := testNamespace + workloadName := policyName + workload := testhelper.NewWorkload(workloadNamespace, workloadName) + policy := testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: workload.APIVersion, + Kind: workload.Kind, + Name: workload.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + + ginkgo.It("AggregateStatus testing", func() { + framework.CreatePropagationPolicy(karmadaClient, policy) + framework.CreateWorkload(dynamicClient, workload) + + ginkgo.By("check whether the workload status can be correctly collected", func() { + // Simulate the workload resource controller behavior, update the status information of workload resources of member clusters manually. + for _, cluster := range framework.ClusterNames() { + clusterDynamicClient := framework.GetClusterDynamicClient(cluster) + gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil()) + + memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName) + memberWorkload.Status.ReadyReplicas = *workload.Spec.Replicas + framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster, "status") + } + + wantedReplicas := *workload.Spec.Replicas * int32(len(framework.Clusters())) + klog.Infof("Waiting for workload(%s/%s) collecting correctly status", workloadNamespace, workloadName) + err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) { + currentWorkload := framework.GetWorkload(dynamicClient, workloadNamespace, workloadName) + + klog.Infof("workload(%s/%s) readyReplicas: %d, wanted replicas: %d", workloadNamespace, workloadName, currentWorkload.Status.ReadyReplicas, wantedReplicas) + if currentWorkload.Status.ReadyReplicas == wantedReplicas { + return true, nil + } + + return false, nil + }) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + }) + + framework.RemoveWorkload(dynamicClient, workload.Namespace, workload.Name) + framework.WaitWorkloadDisappearOnClusters(framework.ClusterNames(), workload.Namespace, workload.Name) + framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) + }) + }) })