e2e: refactor retain test
Signed-off-by: changzhen <changzhen5@huawei.com>
This commit is contained in:
parent
149e0fb867
commit
f4353a1e65
|
@ -2,7 +2,6 @@ package framework
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
@ -20,7 +19,7 @@ import (
|
||||||
|
|
||||||
var workloadGVR = workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
|
var workloadGVR = workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
|
||||||
|
|
||||||
// CreateWorkload create Workload with dynamic client
|
// CreateWorkload creates Workload with dynamic client
|
||||||
func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload) {
|
func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload) {
|
||||||
ginkgo.By(fmt.Sprintf("Creating workload(%s/%s)", workload.Namespace, workload.Name), func() {
|
ginkgo.By(fmt.Sprintf("Creating workload(%s/%s)", workload.Namespace, workload.Name), func() {
|
||||||
unstructuredObj, err := helper.ToUnstructured(workload)
|
unstructuredObj, err := helper.ToUnstructured(workload)
|
||||||
|
@ -31,7 +30,7 @@ func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateWorkload update Workload with dynamic client
|
// UpdateWorkload updates Workload with dynamic client
|
||||||
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string, subresources ...string) {
|
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string, subresources ...string) {
|
||||||
ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() {
|
||||||
newUnstructuredObj, err := helper.ToUnstructured(workload)
|
newUnstructuredObj, err := helper.ToUnstructured(workload)
|
||||||
|
@ -44,30 +43,26 @@ func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workloa
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetWorkload get Workload with dynamic client.
|
// GetWorkload gets Workload with dynamic client.
|
||||||
func GetWorkload(client dynamic.Interface, namespace, name string) *workloadv1alpha1.Workload {
|
func GetWorkload(client dynamic.Interface, namespace, name string) *workloadv1alpha1.Workload {
|
||||||
workload := workloadv1alpha1.Workload{}
|
workload := &workloadv1alpha1.Workload{}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Get workload(%s/%s)", namespace, name), func() {
|
ginkgo.By(fmt.Sprintf("Get workload(%s/%s)", namespace, name), func() {
|
||||||
var err error
|
var err error
|
||||||
unstructuredObj := &unstructured.Unstructured{}
|
unstructuredObj := &unstructured.Unstructured{}
|
||||||
|
|
||||||
gomega.Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
unstructuredObj, err = client.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
unstructuredObj, err = client.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
return err
|
return err
|
||||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
result, err := unstructuredObj.MarshalJSON()
|
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.UnstructuredContent(), workload)
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
|
||||||
|
|
||||||
err = json.Unmarshal(result, &workload)
|
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
return &workload
|
return workload
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveWorkload delete Workload with dynamic client.
|
// RemoveWorkload deletes Workload with dynamic client.
|
||||||
func RemoveWorkload(client dynamic.Interface, namespace, name string) {
|
func RemoveWorkload(client dynamic.Interface, namespace, name string) {
|
||||||
ginkgo.By(fmt.Sprintf("Remove workload(%s/%s)", namespace, name), func() {
|
ginkgo.By(fmt.Sprintf("Remove workload(%s/%s)", namespace, name), func() {
|
||||||
err := client.Resource(workloadGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
err := client.Resource(workloadGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
@ -75,7 +70,7 @@ func RemoveWorkload(client dynamic.Interface, namespace, name string) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitWorkloadPresentOnClusterFitWith wait workload present on member clusters sync with fit func.
|
// WaitWorkloadPresentOnClusterFitWith waits workload present on member cluster sync with fit func.
|
||||||
func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
|
func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
|
||||||
clusterClient := GetClusterDynamicClient(cluster)
|
clusterClient := GetClusterDynamicClient(cluster)
|
||||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||||
|
@ -93,7 +88,16 @@ func WaitWorkloadPresentOnClusterFitWith(cluster, namespace, name string, fit fu
|
||||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitWorkloadDisappearOnCluster wait workload disappear on cluster until timeout.
|
// WaitWorkloadPresentOnClustersFitWith waits workload present on member clusters sync with fit func.
|
||||||
|
func WaitWorkloadPresentOnClustersFitWith(clusters []string, namespace, name string, fit func(workload *workloadv1alpha1.Workload) bool) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Waiting for workload(%s/%s) synced on member clusters fit with func", namespace, name), func() {
|
||||||
|
for _, clusterName := range clusters {
|
||||||
|
WaitWorkloadPresentOnClusterFitWith(clusterName, namespace, name, fit)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitWorkloadDisappearOnCluster waits workload disappear on cluster until timeout.
|
||||||
func WaitWorkloadDisappearOnCluster(cluster, namespace, name string) {
|
func WaitWorkloadDisappearOnCluster(cluster, namespace, name string) {
|
||||||
clusterClient := GetClusterDynamicClient(cluster)
|
clusterClient := GetClusterDynamicClient(cluster)
|
||||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||||
|
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||||
"github.com/karmada-io/karmada/pkg/util/names"
|
"github.com/karmada-io/karmada/pkg/util/names"
|
||||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||||
|
@ -81,41 +82,37 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("InterpreterOperation Retain testing", func() {
|
ginkgo.Context("InterpreterOperation Retain testing", func() {
|
||||||
var waitTime time.Duration
|
|
||||||
var updatedPaused bool
|
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
|
||||||
waitTime = 5 * time.Second
|
|
||||||
updatedPaused = true
|
|
||||||
|
|
||||||
policy.Spec.Placement.ClusterAffinity.ClusterNames = framework.ClusterNames()
|
|
||||||
})
|
|
||||||
|
|
||||||
ginkgo.It("Retain testing", func() {
|
ginkgo.It("Retain testing", func() {
|
||||||
ginkgo.By("update workload's spec.paused to true", func() {
|
ginkgo.By("wait workload exist on the member clusters", func() {
|
||||||
for _, cluster := range framework.ClusterNames() {
|
for _, clusterName := range framework.ClusterNames() {
|
||||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
|
framework.WaitWorkloadPresentOnClusterFitWith(clusterName, workload.Namespace, workload.Name,
|
||||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
func(_ *workloadv1alpha1.Workload) bool {
|
||||||
|
return true
|
||||||
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
|
})
|
||||||
memberWorkload.Spec.Paused = updatedPaused
|
|
||||||
framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Wait executeController to reconcile then check if it is retained
|
ginkgo.By("update workload on the control plane", func() {
|
||||||
time.Sleep(waitTime)
|
gomega.Eventually(func(g gomega.Gomega) error {
|
||||||
|
curWorkload := framework.GetWorkload(dynamicClient, workloadNamespace, workloadName)
|
||||||
|
// construct two values that need to be changed, and only one value is retained.
|
||||||
|
curWorkload.Spec.Replicas = pointer.Int32Ptr(2)
|
||||||
|
curWorkload.Spec.Paused = true
|
||||||
|
|
||||||
|
newUnstructuredObj, err := helper.ToUnstructured(curWorkload)
|
||||||
|
g.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
workloadGVR := workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
|
||||||
|
_, err = dynamicClient.Resource(workloadGVR).Namespace(curWorkload.Namespace).Update(context.TODO(), newUnstructuredObj, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.By("check if workload's spec.paused is retained", func() {
|
ginkgo.By("check if workload's spec.paused is retained", func() {
|
||||||
for _, cluster := range framework.ClusterNames() {
|
framework.WaitWorkloadPresentOnClustersFitWith(framework.ClusterNames(), workload.Namespace, workload.Name,
|
||||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
|
func(workload *workloadv1alpha1.Workload) bool {
|
||||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
return *workload.Spec.Replicas == 2 && !workload.Spec.Paused
|
||||||
|
})
|
||||||
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
|
|
||||||
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
|
|
||||||
|
|
||||||
return memberWorkload.Spec.Paused, nil
|
|
||||||
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -424,12 +421,7 @@ end`,
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("InterpreterOperation Retain testing", func() {
|
ginkgo.Context("InterpreterOperation Retain testing", func() {
|
||||||
var waitTime time.Duration
|
|
||||||
var updatedPaused bool
|
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
waitTime = 5 * time.Second
|
|
||||||
updatedPaused = true
|
|
||||||
customization = testhelper.NewResourceInterpreterCustomization(
|
customization = testhelper.NewResourceInterpreterCustomization(
|
||||||
"interpreter-customization"+rand.String(RandomStrLength),
|
"interpreter-customization"+rand.String(RandomStrLength),
|
||||||
configv1alpha1.CustomizationTarget{
|
configv1alpha1.CustomizationTarget{
|
||||||
|
@ -448,30 +440,24 @@ end`,
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("Retain testing", func() {
|
ginkgo.It("Retain testing", func() {
|
||||||
ginkgo.By("update deployment's spec.paused to true", func() {
|
ginkgo.By("wait deployment exist on the member clusters", func() {
|
||||||
clusterClient := framework.GetClusterClient(targetCluster)
|
|
||||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
|
||||||
var memberDeploy *appsv1.Deployment
|
|
||||||
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
||||||
func(deployment *appsv1.Deployment) bool {
|
func(_ *appsv1.Deployment) bool {
|
||||||
memberDeploy = deployment
|
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
framework.UpdateDeploymentPaused(clusterClient, memberDeploy, updatedPaused)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Wait executeController to reconcile then check if it is retained
|
ginkgo.By("update deployment on the control plane", func() {
|
||||||
time.Sleep(waitTime)
|
// construct two values that need to be changed, and only one value is retained.
|
||||||
|
framework.UpdateDeploymentPaused(kubeClient, deployment, true)
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.By("check if deployment's spec.paused is retained", func() {
|
ginkgo.By("check if deployment's spec.paused is retained", func() {
|
||||||
gomega.Eventually(func(g gomega.Gomega) bool {
|
|
||||||
var memberDeployment *appsv1.Deployment
|
|
||||||
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
framework.WaitDeploymentPresentOnClusterFitWith(targetCluster, deployment.Namespace, deployment.Name,
|
||||||
func(deployment *appsv1.Deployment) bool {
|
func(deployment *appsv1.Deployment) bool {
|
||||||
memberDeployment = deployment
|
return *deployment.Spec.Replicas == 2 && !deployment.Spec.Paused
|
||||||
return true
|
|
||||||
})
|
})
|
||||||
return memberDeployment.Spec.Paused
|
|
||||||
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in New Issue