e2e: refactor retain test
Signed-off-by: changzhen <changzhen5@huawei.com>
This commit is contained in:
parent
149e0fb867
commit
f4353a1e65
|
@ -2,7 +2,6 @@ package framework
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
@ -20,7 +19,7 @@ import (
|
|||
|
||||
var workloadGVR = workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
|
||||
|
||||
// CreateWorkload create Workload with dynamic client
|
||||
// CreateWorkload creates Workload with dynamic client
|
||||
func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload) {
|
||||
ginkgo.By(fmt.Sprintf("Creating workload(%s/%s)", workload.Namespace, workload.Name), func() {
|
||||
unstructuredObj, err := helper.ToUnstructured(workload)
|
||||
|
@ -31,7 +30,7 @@ func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
|
|||
})
|
||||
}
|
||||
|
||||
// UpdateWorkload update Workload with dynamic client
|
||||
// UpdateWorkload updates Workload with dynamic client
|
||||
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string, subresources ...string) {
|
||||
ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() {
|
||||
newUnstructuredObj, err := helper.ToUnstructured(workload)
|
||||
|
@ -44,30 +43,26 @@ func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
|
|||
})
|
||||
}
|
||||
|
||||
// GetWorkload get Workload with dynamic client.
|
||||
// GetWorkload gets Workload with dynamic client.
|
||||
func GetWorkload(client dynamic.Interface, namespace, name string) *workloadv1alpha1.Workload {
|
||||
workload := workloadv1alpha1.Workload{}
|
||||
workload := &workloadv1alpha1.Workload{}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Get workload(%s/%s)", namespace, name), func() {
|
||||
var err error
|
||||
unstructuredObj := &unstructured.Unstructured{}
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
unstructuredObj, err = client.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
return err
|
||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
result, err := unstructuredObj.MarshalJSON()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = json.Unmarshal(result, &workload)
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.UnstructuredContent(), workload)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
return &workload
|
||||
return workload
|
||||
}
|
||||
|
||||
// RemoveWorkload delete Workload with dynamic client.
|
||||
// RemoveWorkload deletes Workload with dynamic client.
|
||||
func RemoveWorkload(client dynamic.Interface, namespace, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Remove workload(%s/%s)", namespace, name), func() {
|
||||
err := client.Resource(workloadGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
|
@ -75,7 +70,7 @@ func RemoveWorkload(client dynamic.Interface, namespace, name string) {
|
|||
})
|
||||
}
|
||||
|
||||
// WaitWorkloadPresentOnClusterFitWith wait workload present on member clusters sync with fit func.
|
||||
// WaitWorkloadPresentOnClusterFitWith waits workload present on member cluster sync with fit func.
|
||||
func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
|
||||
clusterClient := GetClusterDynamicClient(cluster)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
@ -93,7 +88,16 @@ func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit fu
|
|||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
|
||||
// WaitWorkloadDisappearOnCluster wait workload disappear on cluster until timeout.
|
||||
// WaitWorkloadPresentOnClustersFitWith waits workload present on member clusters sync with fit func.
|
||||
func WaitWorkloadPresentOnClustersFitWith(clusters []string, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
|
||||
ginkgo.By(fmt.Sprintf("Waiting for workload(%s/%s) synced on member clusters fit with func", namespace, name), func() {
|
||||
for _, clusterName := range clusters {
|
||||
WaitWorkloadPresentOnClusterFitWith(clusterName, namespace, name, fit)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitWorkloadDisappearOnCluster waits workload disappear on cluster until timeout.
|
||||
func WaitWorkloadDisappearOnCluster(cluster, namespace, name string) {
|
||||
clusterClient := GetClusterDynamicClient(cluster)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
|
@ -81,41 +82,37 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.Context("InterpreterOperation Retain testing", func() {
|
||||
var waitTime time.Duration
|
||||
var updatedPaused bool
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
waitTime = 5 * time.Second
|
||||
updatedPaused = true
|
||||
|
||||
policy.Spec.Placement.ClusterAffinity.ClusterNames = framework.ClusterNames()
|
||||
})
|
||||
|
||||
ginkgo.It("Retain testing", func() {
|
||||
ginkgo.By("update workload's spec.paused to true", func() {
|
||||
for _, cluster := range framework.ClusterNames() {
|
||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
|
||||
memberWorkload.Spec.Paused = updatedPaused
|
||||
framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster)
|
||||
ginkgo.By("wait workload exist on the member clusters", func() {
|
||||
for _, clusterName := range framework.ClusterNames() {
|
||||
framework.WaitWorkloadPresentOnClusterFitWith(clusterName, workload.Namespace, workload.Name,
|
||||
func(_ *workloadv1alpha1.Workload) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Wait executeController to reconcile then check if it is retained
|
||||
time.Sleep(waitTime)
|
||||
ginkgo.By("update workload on the control plane", func() {
|
||||
gomega.Eventually(func(g gomega.Gomega) error {
|
||||
curWorkload := framework.GetWorkload(dynamicClient, workloadNamespace, workloadName)
|
||||
// construct two values that need to be changed, and only one value is retained.
|
||||
curWorkload.Spec.Replicas = pointer.Int32Ptr(2)
|
||||
curWorkload.Spec.Paused = true
|
||||
|
||||
newUnstructuredObj, err := helper.ToUnstructured(curWorkload)
|
||||
g.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
workloadGVR := workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
|
||||
_, err = dynamicClient.Resource(workloadGVR).Namespace(curWorkload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if workload's spec.paused is retained", func() {
|
||||
for _, cluster := range framework.ClusterNames() {
|
||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
|
||||
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
|
||||
|
||||
return memberWorkload.Spec.Paused, nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
|
||||
}
|
||||
framework.WaitWorkloadPresentOnClustersFitWith(framework.ClusterNames(), workload.Namespace, workload.Name,
|
||||
func(workload *workloadv1alpha1.Workload) bool {
|
||||
return *workload.Spec.Replicas == 2 && !workload.Spec.Paused
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -424,12 +421,7 @@ end`,
|
|||
})
|
||||
|
||||
ginkgo.Context("InterpreterOperation Retain testing", func() {
|
||||
var waitTime time.Duration
|
||||
var updatedPaused bool
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
waitTime = 5 * time.Second
|
||||
updatedPaused = true
|
||||
customization = testhelper.NewResourceInterpreterCustomization(
|
||||
"interpreter-customization"+rand.String(RandomStrLength),
|
||||
configv1alpha1.CustomizationTarget{
|
||||
|
@ -448,30 +440,24 @@ end`,
|
|||
})
|
||||
|
||||
ginkgo.It("Retain testing", func() {
|
||||
ginkgo.By("update deployment's spec.paused to true", func() {
|
||||
clusterClient := framework.GetClusterClient(targetCluster)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
var memberDeploy *appsv1.Deployment
|
||||
ginkgo.By("wait deployment exist on the member clusters", func() {
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
||||
func(deployment *appsv1.Deployment) bool {
|
||||
memberDeploy = deployment
|
||||
func(_ *appsv1.Deployment) bool {
|
||||
return true
|
||||
})
|
||||
framework.UpdateDeploymentPaused(clusterClient, memberDeploy, updatedPaused)
|
||||
})
|
||||
|
||||
// Wait executeController to reconcile then check if it is retained
|
||||
time.Sleep(waitTime)
|
||||
ginkgo.By("update deployment on the control plane", func() {
|
||||
// construct two values that need to be changed, and only one value is retained.
|
||||
framework.UpdateDeploymentPaused(kubeClient, deployment, true)
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
|
||||
})
|
||||
|
||||
ginkgo.By("check if deployment's spec.paused is retained", func() {
|
||||
gomega.Eventually(func(g gomega.Gomega) bool {
|
||||
var memberDeployment *appsv1.Deployment
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
||||
func(deployment *appsv1.Deployment) bool {
|
||||
memberDeployment = deployment
|
||||
return true
|
||||
})
|
||||
return memberDeployment.Spec.Paused
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
||||
func(deployment *appsv1.Deployment) bool {
|
||||
return *deployment.Spec.Replicas == 2 && !deployment.Spec.Paused
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue