Add e2e test for the AggregateStatus hook

Signed-off-by: Xinzhao Xu <z2d@jifangcheng.com>
This commit is contained in:
Xinzhao Xu 2022-01-19 11:51:58 +08:00
parent 9a20367c97
commit bc883c86b1
2 changed files with 57 additions and 2 deletions

View File

@ -32,13 +32,13 @@ func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
}
// UpdateWorkload update Workload with dynamic client
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string) {
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string, subresources ...string) {
ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() {
newUnstructuredObj, err := helper.ToUnstructured(workload)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
_, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{})
_, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{}, subresources...)
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})

View File

@ -9,6 +9,7 @@ import (
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
@ -178,4 +179,58 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})
ginkgo.Context("InterpreterOperation AggregateStatus testing", func() {
policyNamespace := testNamespace
policyName := workloadNamePrefix + rand.String(RandomStrLength)
workloadNamespace := testNamespace
workloadName := policyName
workload := testhelper.NewWorkload(workloadNamespace, workloadName)
policy := testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: workload.APIVersion,
Kind: workload.Kind,
Name: workload.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
})
ginkgo.It("AggregateStatus testing", func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateWorkload(dynamicClient, workload)
ginkgo.By("check whether the workload status can be correctly collected", func() {
// Simulate the workload resource controller behavior, update the status information of workload resources of member clusters manually.
for _, cluster := range framework.ClusterNames() {
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
memberWorkload.Status.ReadyReplicas = *workload.Spec.Replicas
framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster, "status")
}
wantedReplicas := *workload.Spec.Replicas * int32(len(framework.Clusters()))
klog.Infof("Waiting for workload(%s/%s) collecting correctly status", workloadNamespace, workloadName)
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
currentWorkload := framework.GetWorkload(dynamicClient, workloadNamespace, workloadName)
klog.Infof("workload(%s/%s) readyReplicas: %d, wanted replicas: %d", workloadNamespace, workloadName, currentWorkload.Status.ReadyReplicas, wantedReplicas)
if currentWorkload.Status.ReadyReplicas == wantedReplicas {
return true, nil
}
return false, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
framework.RemoveWorkload(dynamicClient, workload.Namespace, workload.Name)
framework.WaitWorkloadDisappearOnClusters(framework.ClusterNames(), workload.Namespace, workload.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})
})