Merge pull request #835 from XiShanYongYe-Chang/e2e-framework
refactor: add framework for e2e
This commit is contained in:
commit
1954628ca7
|
|
@ -1,23 +1,14 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -40,83 +31,20 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] basic cluster propagation tes
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
crdGVR := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating crdPolicy(%s)", crdPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), crdPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing crdPolicy(%s)", crdPolicy.Name), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), crdPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("crd propagation testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating crd(%s)", crd.Name), func() {
|
||||
unstructObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(crd)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, crdPolicy)
|
||||
framework.CreateCRD(dynamicClient, crd)
|
||||
framework.GetCRD(dynamicClient, crd.Name)
|
||||
framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(),
|
||||
fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1"), crd.Spec.Names.Kind)
|
||||
|
||||
_, err = dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Create(context.TODO(), &unstructured.Unstructured{Object: unstructObj}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("get crd(%s)", crd.Name), func() {
|
||||
_, err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
// Check CRD enablement from cluster objects instead of member clusters.
|
||||
// After CRD installed on member cluster, the cluster status controller takes at most cluster-status-update-frequency
|
||||
// time to collect the API list, before that the scheduler will filter out the cluster from scheduling.
|
||||
ginkgo.By("check if crd present on member clusters", func() {
|
||||
crAPIVersion := fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1")
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
clusters, err := fetchClusters(karmadaClient)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
if !helper.IsAPIEnabled(cluster.Status.APIEnablements, crAPIVersion, crdSpecNames.Kind) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing crd(%s)", crd.Name), func() {
|
||||
err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if crd disappeared from member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterDynamicClient := getClusterDynamicClient(cluster.Name)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for crd(%s) disappeared on cluster(%s)", crd.Name, cluster.Name)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
_, err = clusterDynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
framework.RemoveCRD(dynamicClient, crd.Name)
|
||||
framework.WaitCRDDisappearedOnClusters(framework.ClusterNames(), crd.Name)
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, crdPolicy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -65,33 +66,15 @@ var _ = ginkgo.Describe("failover testing", func() {
|
|||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("deployment failover testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
fmt.Printf("MaxGroups= %v, MinGroups= %v\n", maxGroups, minGroups)
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
fmt.Printf("View the results of the initial scheduling")
|
||||
targetClusterNames, _ = getTargetClusterNames(deployment)
|
||||
for _, clusterName := range targetClusterNames {
|
||||
fmt.Printf("%s is the target cluster\n", clusterName)
|
||||
}
|
||||
})
|
||||
fmt.Printf("View the results of the initial scheduling")
|
||||
targetClusterNames, _ = getTargetClusterNames(deployment)
|
||||
for _, clusterName := range targetClusterNames {
|
||||
fmt.Printf("%s is the target cluster\n", clusterName)
|
||||
}
|
||||
|
||||
ginkgo.By("set one cluster condition status to false", func() {
|
||||
temp := numOfFailedClusters
|
||||
|
|
@ -131,7 +114,7 @@ var _ = ginkgo.Describe("failover testing", func() {
|
|||
// the target cluster should be overwritten to another available cluster
|
||||
gomega.Expect(isDisabled(targetClusterName, disabledClusters)).Should(gomega.BeFalse())
|
||||
|
||||
clusterClient := getClusterClient(targetClusterName)
|
||||
clusterClient := framework.GetClusterClient(targetClusterName)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Check whether deployment(%s/%s) is present on cluster(%s)", deploymentNamespace, deploymentName, targetClusterName)
|
||||
|
|
@ -185,10 +168,8 @@ var _ = ginkgo.Describe("failover testing", func() {
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -280,7 +261,7 @@ func isDisabled(clusterName string, disabledClusters []*clusterv1alpha1.Cluster)
|
|||
|
||||
// get the API endpoint of a specific cluster
|
||||
func getClusterAPIEndpoint(clusterName string) (apiEndpoint string) {
|
||||
for _, cluster := range clusters {
|
||||
for _, cluster := range framework.Clusters() {
|
||||
if cluster.Name == clusterName {
|
||||
apiEndpoint = cluster.Spec.APIEndpoint
|
||||
fmt.Printf("original API endpoint of the cluster %s is %s\n", clusterName, apiEndpoint)
|
||||
|
|
|
|||
|
|
@ -7,13 +7,13 @@ import (
|
|||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -55,23 +55,16 @@ var _ = ginkgo.Describe("propagation with fieldSelector testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
FieldSelector: filedSelector,
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("setting provider and region for clusters", func() {
|
||||
providerMap := []string{"huaweicloud", "huaweicloud", "kind"}
|
||||
regionMap := []string{"cn-south-1", "cn-north-1", "cn-east-1"}
|
||||
for index, cluster := range clusterNames {
|
||||
for index, cluster := range framework.ClusterNames() {
|
||||
if index > 2 {
|
||||
break
|
||||
}
|
||||
|
|
@ -91,16 +84,9 @@ var _ = ginkgo.Describe("propagation with fieldSelector testing", func() {
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("recovering provider and region for clusters", func() {
|
||||
for index, cluster := range clusterNames {
|
||||
for index, cluster := range framework.ClusterNames() {
|
||||
if index > 2 {
|
||||
break
|
||||
}
|
||||
|
|
@ -119,10 +105,8 @@ var _ = ginkgo.Describe("propagation with fieldSelector testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.It("propagation with fieldSelector testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check whether deployment is scheduled to clusters which meeting the fieldSelector requirements", func() {
|
||||
targetClusterNames, err := getTargetClusterNames(deployment)
|
||||
|
|
@ -131,10 +115,8 @@ var _ = ginkgo.Describe("propagation with fieldSelector testing", func() {
|
|||
gomega.Expect(targetClusterNames[0] == desiredScheduleResult).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,228 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinimumCluster represents the minimum number of member clusters to run E2E test.
|
||||
MinimumCluster = 2
|
||||
)
|
||||
|
||||
var (
|
||||
clusters []*clusterv1alpha1.Cluster
|
||||
clusterNames []string
|
||||
clusterClients []*util.ClusterClient
|
||||
clusterDynamicClients []*util.DynamicClusterClient
|
||||
pullModeClusters map[string]string
|
||||
)
|
||||
|
||||
// Clusters will return all member clusters we have.
|
||||
func Clusters() []*clusterv1alpha1.Cluster {
|
||||
return clusters
|
||||
}
|
||||
|
||||
// ClusterNames will return all member clusters' names we have.
|
||||
func ClusterNames() []string {
|
||||
return clusterNames
|
||||
}
|
||||
|
||||
// InitClusterInformation init the E2E test's cluster information.
|
||||
func InitClusterInformation(karmadaClient karmada.Interface, controlPlaneClient client.Client) {
|
||||
var err error
|
||||
|
||||
pullModeClusters, err = fetchPullBasedClusters()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
clusters, err = fetchClusters(karmadaClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
var meetRequirement bool
|
||||
meetRequirement, err = isClusterMeetRequirements(clusters)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Expect(meetRequirement).Should(gomega.BeTrue())
|
||||
|
||||
for _, cluster := range clusters {
|
||||
clusterClient, clusterDynamicClient, err := newClusterClientSet(controlPlaneClient, cluster)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
clusterNames = append(clusterNames, cluster.Name)
|
||||
clusterClients = append(clusterClients, clusterClient)
|
||||
clusterDynamicClients = append(clusterDynamicClients, clusterDynamicClient)
|
||||
|
||||
err = setClusterLabel(controlPlaneClient, cluster.Name)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
gomega.Expect(clusterNames).Should(gomega.HaveLen(len(clusters)))
|
||||
}
|
||||
|
||||
// GetClusterClient get cluster client
|
||||
func GetClusterClient(clusterName string) kubernetes.Interface {
|
||||
for _, clusterClient := range clusterClients {
|
||||
if clusterClient.ClusterName == clusterName {
|
||||
return clusterClient.KubeClient
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterDynamicClient get cluster dynamicClient
|
||||
func GetClusterDynamicClient(clusterName string) dynamic.Interface {
|
||||
for _, clusterClient := range clusterDynamicClients {
|
||||
if clusterClient.ClusterName == clusterName {
|
||||
return clusterClient.DynamicClientSet
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchPullBasedClusters() (map[string]string, error) {
|
||||
pullBasedClusters := os.Getenv("PULL_BASED_CLUSTERS")
|
||||
if pullBasedClusters == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pullBasedClustersMap := make(map[string]string)
|
||||
pullBasedClusters = strings.TrimSuffix(pullBasedClusters, ";")
|
||||
clusterInfo := strings.Split(pullBasedClusters, ";")
|
||||
for _, cluster := range clusterInfo {
|
||||
clusterNameAndConfigPath := strings.Split(cluster, ":")
|
||||
if len(clusterNameAndConfigPath) != 2 {
|
||||
return nil, fmt.Errorf("failed to parse config path for cluster: %s", cluster)
|
||||
}
|
||||
pullBasedClustersMap[clusterNameAndConfigPath[0]] = clusterNameAndConfigPath[1]
|
||||
}
|
||||
return pullBasedClustersMap, nil
|
||||
}
|
||||
|
||||
// fetchClusters will fetch all member clusters we have.
|
||||
func fetchClusters(client karmada.Interface) ([]*clusterv1alpha1.Cluster, error) {
|
||||
clusterList, err := client.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clusters := make([]*clusterv1alpha1.Cluster, 0, len(clusterList.Items))
|
||||
for _, cluster := range clusterList.Items {
|
||||
pinedCluster := cluster
|
||||
if pinedCluster.Spec.SyncMode == clusterv1alpha1.Pull {
|
||||
if _, exist := pullModeClusters[cluster.Name]; !exist {
|
||||
continue
|
||||
}
|
||||
}
|
||||
clusters = append(clusters, &pinedCluster)
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
// fetchCluster will fetch member cluster by name.
|
||||
func fetchCluster(client karmada.Interface, clusterName string) (*clusterv1alpha1.Cluster, error) {
|
||||
cluster, err := client.ClusterV1alpha1().Clusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// isClusterMeetRequirements checks if current environment meet the requirements of E2E.
|
||||
func isClusterMeetRequirements(clusters []*clusterv1alpha1.Cluster) (bool, error) {
|
||||
// check if member cluster number meets requirements
|
||||
if len(clusters) < MinimumCluster {
|
||||
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumCluster, len(clusters))
|
||||
}
|
||||
|
||||
// check if all member cluster status is ready
|
||||
for _, cluster := range clusters {
|
||||
if !util.IsClusterReady(&cluster.Status) {
|
||||
return false, fmt.Errorf("cluster %s not ready", cluster.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
klog.Infof("Got %d member cluster and all in ready state.", len(clusters))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func newClusterClientSet(controlPlaneClient client.Client, c *clusterv1alpha1.Cluster) (*util.ClusterClient, *util.DynamicClusterClient, error) {
|
||||
if c.Spec.SyncMode == clusterv1alpha1.Push {
|
||||
clusterClient, err := util.NewClusterClientSet(c, controlPlaneClient, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
clusterDynamicClient, err := util.NewClusterDynamicClientSet(c, controlPlaneClient)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return clusterClient, clusterDynamicClient, nil
|
||||
}
|
||||
|
||||
clusterConfigPath := pullModeClusters[c.Name]
|
||||
clusterConfig, err := clientcmd.BuildConfigFromFlags("", clusterConfigPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
clusterClientSet := util.ClusterClient{ClusterName: c.Name}
|
||||
clusterDynamicClientSet := util.DynamicClusterClient{ClusterName: c.Name}
|
||||
clusterClientSet.KubeClient = kubernetes.NewForConfigOrDie(clusterConfig)
|
||||
clusterDynamicClientSet.DynamicClientSet = dynamic.NewForConfigOrDie(clusterConfig)
|
||||
|
||||
return &clusterClientSet, &clusterDynamicClientSet, nil
|
||||
}
|
||||
|
||||
// setClusterLabel set cluster label of E2E
|
||||
func setClusterLabel(c client.Client, clusterName string) error {
|
||||
err := wait.PollImmediate(2*time.Second, 10*time.Second, func() (done bool, err error) {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if clusterObj.Labels == nil {
|
||||
clusterObj.Labels = make(map[string]string)
|
||||
}
|
||||
clusterObj.Labels["location"] = "CHN"
|
||||
if clusterObj.Spec.SyncMode == clusterv1alpha1.Push {
|
||||
clusterObj.Labels["sync-mode"] = "Push"
|
||||
}
|
||||
if err := c.Update(context.TODO(), clusterObj); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetClusterNamesFromClusters will get Clusters' names form Clusters Object.
|
||||
func GetClusterNamesFromClusters(clusters []*clusterv1alpha1.Cluster) []string {
|
||||
clusterNames := make([]string, 0, len(clusters))
|
||||
for _, cluster := range clusters {
|
||||
clusterNames = append(clusterNames, cluster.Name)
|
||||
}
|
||||
return clusterNames
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
// CreateClusterPropagationPolicy create ClusterPropagationPolicy with karmada client.
|
||||
func CreateClusterPropagationPolicy(client karmada.Interface, policy *policyv1alpha1.ClusterPropagationPolicy) {
|
||||
ginkgo.By(fmt.Sprintf("Creating ClusterPropagationPolicy(%s)", policy.Name), func() {
|
||||
_, err := client.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveClusterPropagationPolicy delete ClusterPropagationPolicy with karmada client.
|
||||
func RemoveClusterPropagationPolicy(client karmada.Interface, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Removing ClusterPropagationPolicy(%s)", name), func() {
|
||||
err := client.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
package framework
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// pollInterval defines the interval time for a poll operation.
|
||||
pollInterval = 5 * time.Second
|
||||
// pollTimeout defines the time after which the poll operation times out.
|
||||
pollTimeout = 300 * time.Second
|
||||
)
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
var crdGVR = schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
|
||||
|
||||
// CreateCRD create CustomResourceDefinition with dynamic client.
|
||||
func CreateCRD(client dynamic.Interface, crd *apiextensionsv1.CustomResourceDefinition) {
|
||||
ginkgo.By(fmt.Sprintf("Creating crd(%s)", crd.Name), func() {
|
||||
unstructObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(crd)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = client.Resource(crdGVR).Create(context.TODO(), &unstructured.Unstructured{Object: unstructObj}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// GetCRD get CustomResourceDefinition with dynamic client.
|
||||
func GetCRD(client dynamic.Interface, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Get crd(%s)", name), func() {
|
||||
_, err := client.Resource(crdGVR).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveCRD delete CustomResourceDefinition with dynamic client.
|
||||
func RemoveCRD(client dynamic.Interface, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Removing crd(%s)", name), func() {
|
||||
err := client.Resource(crdGVR).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// WaitCRDPresentOnClusters wait CustomResourceDefinition present on clusters until timeout.
|
||||
func WaitCRDPresentOnClusters(client karmada.Interface, clusters []string, crdAPIVersion, crdKind string) {
|
||||
// Check CRD enablement from cluster objects instead of member clusters.
|
||||
// After CRD installed on member cluster, the cluster status controller takes at most cluster-status-update-frequency
|
||||
// time to collect the API list, before that the scheduler will filter out the cluster from scheduling.
|
||||
ginkgo.By(fmt.Sprintf("Check if crd(%s/%s) present on member clusters", crdAPIVersion, crdKind), func() {
|
||||
for _, clusterName := range clusters {
|
||||
klog.Infof("Waiting for crd present on cluster(%s)", clusterName)
|
||||
gomega.Eventually(func() bool {
|
||||
cluster, err := fetchCluster(client, clusterName)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
return helper.IsAPIEnabled(cluster.Status.APIEnablements, crdAPIVersion, crdKind)
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitCRDDisappearedOnClusters wait CustomResourceDefinition disappear on clusters until timeout.
|
||||
func WaitCRDDisappearedOnClusters(clusters []string, crdName string) {
|
||||
ginkgo.By("Check if crd disappeared on member clusters", func() {
|
||||
for _, clusterName := range clusters {
|
||||
clusterDynamicClient := GetClusterDynamicClient(clusterName)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for crd(%s) disappeared on cluster(%s)", crdName, clusterName)
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := clusterDynamicClient.Resource(crdGVR).Get(context.TODO(), crdName, metav1.GetOptions{})
|
||||
return apierrors.IsNotFound(err)
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// CreateDeployment create Deployment.
|
||||
func CreateDeployment(client kubernetes.Interface, deployment *appsv1.Deployment) {
|
||||
ginkgo.By(fmt.Sprintf("Creating Deployment(%s/%s)", deployment.Namespace, deployment.Name), func() {
|
||||
_, err := client.AppsV1().Deployments(deployment.Namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveDeployment delete Deployment.
|
||||
func RemoveDeployment(client kubernetes.Interface, namespace, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Removing Deployment(%s/%s)", namespace, name), func() {
|
||||
err := client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// WaitDeploymentPresentOnClusters wait deployment present on member clusters until timeout.
|
||||
func WaitDeploymentPresentOnClusters(clusters []string, namespace, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Check if deployment(%s/%s) present on member clusters", namespace, name), func() {
|
||||
for _, clusterName := range clusters {
|
||||
clusterClient := GetClusterClient(clusterName)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment present on cluster(%s)", clusterName)
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := clusterClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
return true
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitDeploymentDisappearOnClusters wait deployment disappear on member clusters until timeout.
|
||||
func WaitDeploymentDisappearOnClusters(clusters []string, namespace, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Check if deployment(%s/%s) diappeare on member clusters", namespace, name), func() {
|
||||
for _, clusterName := range clusters {
|
||||
clusterClient := GetClusterClient(clusterName)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment disappear on cluster(%s)", clusterName)
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := clusterClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
return apierrors.IsNotFound(err)
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeploymentReplicas update deployment's replicas.
|
||||
func UpdateDeploymentReplicas(client kubernetes.Interface, deployment *appsv1.Deployment, replicas int32) {
|
||||
ginkgo.By(fmt.Sprintf("Updating Deployment(%s/%s)'s replicas to %d", deployment.Namespace, deployment.Name, replicas), func() {
|
||||
deployment.Spec.Replicas = &replicas
|
||||
gomega.Eventually(func() error {
|
||||
_, err := client.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// WaitDeploymentPresentOnClusterFitWith wait deployment present on member clusters fit with fit func.
|
||||
func WaitDeploymentPresentOnClusterFitWith(client kubernetes.Interface, namespace, name string, fit func(deployment *appsv1.Deployment) bool) {
|
||||
gomega.Eventually(func() bool {
|
||||
dep, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fit(dep)
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
// CreatePropagationPolicy create PropagationPolicy with karmada client.
|
||||
func CreatePropagationPolicy(client karmada.Interface, policy *policyv1alpha1.PropagationPolicy) {
|
||||
ginkgo.By(fmt.Sprintf("Creating PropataionPolicy(%s/%s)", policy.Namespace, policy.Name), func() {
|
||||
_, err := client.PolicyV1alpha1().PropagationPolicies(policy.Namespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// RemovePropagationPolicy delete PropagationPolicy with karmada client.
|
||||
func RemovePropagationPolicy(client karmada.Interface, namespace, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Removing PropataionPolicy(%s/%s)", namespace, name), func() {
|
||||
err := client.PolicyV1alpha1().PropagationPolicies(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
|
@ -15,13 +15,12 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -187,7 +186,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
serviceImportPolicy := testhelper.NewClusterPropagationPolicy(serviceImportPolicyName, []policyv1alpha1.ResourceSelector{
|
||||
|
|
@ -198,66 +197,21 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Create ClusterPropagationPolicy(%s) to Propagation ServiceExport CRD", serviceExportPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), serviceExportPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create ClusterPropagationPolicy(%s) to Propagation ServiceImport CRD", serviceImportPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), serviceImportPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("Wait ServiceExport CRD present on member clusters", func() {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
clusters, err := fetchClusters(karmadaClient)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
if !helper.IsAPIEnabled(cluster.Status.APIEnablements, mcsv1alpha1.GroupVersion.String(), util.ServiceExportKind) {
|
||||
klog.Infof("Waiting for CRD(%s) present on member cluster(%s)", util.ServiceExportKind, cluster.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("Wait ServiceImport CRD present on member clusters", func() {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
clusters, err := fetchClusters(karmadaClient)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
if !helper.IsAPIEnabled(cluster.Status.APIEnablements, mcsv1alpha1.GroupVersion.String(), util.ServiceImportKind) {
|
||||
klog.Infof("Waiting for CRD(%s) present on member cluster(%s)", util.ServiceImportKind, cluster.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, serviceExportPolicy)
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, serviceImportPolicy)
|
||||
framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(), mcsv1alpha1.GroupVersion.String(), util.ServiceExportKind)
|
||||
framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(), mcsv1alpha1.GroupVersion.String(), util.ServiceImportKind)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Delete ClusterPropagationPolicy %s", serviceExportPolicy.Name), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), serviceExportPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, serviceImportPolicy.Name)
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, serviceExportPolicy.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete ClusterPropagationPolicy %s", serviceImportPolicy.Name), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), serviceImportPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
// Now the deletion of ClusterPropagationPolicy will not cause the deletion of related binding and workload on member clusters,
|
||||
// so we do not need to wait the disappearance of ServiceExport CRD and ServiceImport CRD
|
||||
})
|
||||
|
|
@ -266,7 +220,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
serviceExport, serviceImport, exportPolicy, importPolicy, demoDeployment, demoService := getPrepareInfo()
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
exportClusterClient := getClusterClient(serviceExportClusterName)
|
||||
exportClusterClient := framework.GetClusterClient(serviceExportClusterName)
|
||||
gomega.Expect(exportClusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create Deployment(%s/%s) in %s cluster", demoDeployment.Namespace, demoDeployment.Name, serviceExportClusterName), func() {
|
||||
|
|
@ -296,7 +250,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
exportClusterClient := getClusterClient(serviceExportClusterName)
|
||||
exportClusterClient := framework.GetClusterClient(serviceExportClusterName)
|
||||
gomega.Expect(exportClusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete Deployment(%s/%s) in %s cluster", demoDeployment.Namespace, demoDeployment.Name, serviceExportClusterName), func() {
|
||||
|
|
@ -311,7 +265,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.It("Export Service from source-clusters, import Service to destination-clusters", func() {
|
||||
importClusterClient := getClusterClient(serviceImportClusterName)
|
||||
importClusterClient := framework.GetClusterClient(serviceImportClusterName)
|
||||
gomega.Expect(importClusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create ServiceExport(%s/%s)", serviceExport.Namespace, serviceExport.Name), func() {
|
||||
|
|
@ -319,10 +273,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create PropagationPolicy(%s/%s) to propagate ServiceExport", exportPolicy.Namespace, exportPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(exportPolicy.Namespace).Create(context.TODO(), exportPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, exportPolicy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait EndpointSlices collected to namespace(%s) in controller-plane", demoService.Namespace), func() {
|
||||
gomega.Eventually(func() int {
|
||||
|
|
@ -344,10 +295,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create PropagationPolicy(%s/%s) to propagate ServiveImport", importPolicy.Namespace, importPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(importPolicy.Namespace).Create(context.TODO(), importPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, importPolicy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait derived-service(%s/%s) exist in %s cluster", demoService.Namespace, names.GenerateDerivedServiceName(demoService.Name), serviceImportClusterName), func() {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
|
|
@ -410,15 +358,12 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
err := controlPlaneClient.Delete(context.TODO(), &serviceExport)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = karmadaClient.PolicyV1alpha1().PropagationPolicies(exportPolicy.Namespace).Delete(context.TODO(), exportPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = controlPlaneClient.Delete(context.TODO(), &serviceImport)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = karmadaClient.PolicyV1alpha1().PropagationPolicies(importPolicy.Namespace).Delete(context.TODO(), importPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, exportPolicy.Namespace, exportPolicy.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, importPolicy.Namespace, importPolicy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -426,7 +371,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
serviceExport, serviceImport, exportPolicy, importPolicy, demoDeployment, demoService := getPrepareInfo()
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
exportClusterClient := getClusterClient(serviceExportClusterName)
|
||||
exportClusterClient := framework.GetClusterClient(serviceExportClusterName)
|
||||
gomega.Expect(exportClusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create Deployment(%s/%s) in %s cluster", demoDeployment.Namespace, demoDeployment.Name, serviceExportClusterName), func() {
|
||||
|
|
@ -456,7 +401,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
exportClusterClient := getClusterClient(serviceExportClusterName)
|
||||
exportClusterClient := framework.GetClusterClient(serviceExportClusterName)
|
||||
gomega.Expect(exportClusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete Deployment(%s/%s) in %s cluster", demoDeployment.Namespace, demoDeployment.Name, serviceExportClusterName), func() {
|
||||
|
|
@ -471,9 +416,9 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.It("Update Deployment's replicas", func() {
|
||||
exportClusterClient := getClusterClient(serviceExportClusterName)
|
||||
exportClusterClient := framework.GetClusterClient(serviceExportClusterName)
|
||||
gomega.Expect(exportClusterClient).ShouldNot(gomega.BeNil())
|
||||
importClusterClient := getClusterClient(serviceImportClusterName)
|
||||
importClusterClient := framework.GetClusterClient(serviceImportClusterName)
|
||||
gomega.Expect(importClusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create ServiceExport(%s/%s)", serviceExport.Namespace, serviceExport.Name), func() {
|
||||
|
|
@ -481,10 +426,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create PropagationPolicy(%s/%s)", exportPolicy.Namespace, exportPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(exportPolicy.Namespace).Create(context.TODO(), exportPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, exportPolicy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait EndpointSlices collected to namespace(%s) in controller-plane", demoService.Namespace), func() {
|
||||
gomega.Eventually(func() int {
|
||||
|
|
@ -506,10 +448,7 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Create PropagationPolicy(%s/%s) to propagate ServiveImport", importPolicy.Namespace, importPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(importPolicy.Namespace).Create(context.TODO(), importPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, importPolicy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait EndpointSlice exist in %s cluster", serviceImportClusterName), func() {
|
||||
gomega.Eventually(func() int {
|
||||
|
|
@ -555,15 +494,12 @@ var _ = ginkgo.Describe("[MCS] Multi-Cluster Service testing", func() {
|
|||
err := controlPlaneClient.Delete(context.TODO(), &serviceExport)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = karmadaClient.PolicyV1alpha1().PropagationPolicies(exportPolicy.Namespace).Delete(context.TODO(), exportPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = controlPlaneClient.Delete(context.TODO(), &serviceImport)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
err = karmadaClient.PolicyV1alpha1().PropagationPolicies(importPolicy.Namespace).Delete(context.TODO(), importPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, exportPolicy.Namespace, exportPolicy.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, importPolicy.Namespace, importPolicy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/karmadactl"
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl/options"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -39,8 +40,8 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes
|
|||
|
||||
ginkgo.It("namespace should be propagated to member clusters", func() {
|
||||
ginkgo.By("check if namespace appear in member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
|
|
@ -70,8 +71,8 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes
|
|||
})
|
||||
|
||||
ginkgo.By("check if namespace appear in member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
|
|
@ -97,8 +98,8 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes
|
|||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("namespace(%s) shoud be disappeared", namespaceName), func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -36,7 +37,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
overridePolicy := helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{
|
||||
|
|
@ -46,7 +47,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
Name: deployment.Name,
|
||||
},
|
||||
}, policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
}, policyv1alpha1.Overriders{
|
||||
ImageOverrider: []policyv1alpha1.ImageOverrider{
|
||||
{
|
||||
|
|
@ -68,10 +69,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Create(context.TODO(), propagationPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, propagationPolicy)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
|
@ -89,21 +87,15 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Delete(context.TODO(), propagationPolicyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("deployment imageOverride testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check if deployment present on member clusters have correct image value", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
var deploymentInCluster *appsv1.Deployment
|
||||
|
|
@ -127,10 +119,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
})
|
||||
|
||||
})
|
||||
|
|
@ -152,7 +141,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
overridePolicy := helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{
|
||||
|
|
@ -162,7 +151,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
Name: pod.Name,
|
||||
},
|
||||
}, policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
}, policyv1alpha1.Overriders{
|
||||
ImageOverrider: []policyv1alpha1.ImageOverrider{
|
||||
{
|
||||
|
|
@ -184,10 +173,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Create(context.TODO(), propagationPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, propagationPolicy)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
|
@ -205,10 +191,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Delete(context.TODO(), propagationPolicyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("pod imageOverride testing", func() {
|
||||
|
|
@ -218,8 +201,8 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if pod present on member clusters have correct image value", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
var podInClusters *corev1.Pod
|
||||
|
|
@ -269,7 +252,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
overridePolicy := helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{
|
||||
|
|
@ -279,7 +262,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
Name: deployment.Name,
|
||||
},
|
||||
}, policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
}, policyv1alpha1.Overriders{
|
||||
ImageOverrider: []policyv1alpha1.ImageOverrider{
|
||||
{
|
||||
|
|
@ -294,10 +277,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Create(context.TODO(), propagationPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, propagationPolicy)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
|
@ -315,21 +295,15 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Delete(context.TODO(), propagationPolicyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("deployment imageOverride testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check if deployment present on member clusters have correct image value", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
var deploymentInCluster *appsv1.Deployment
|
||||
|
|
@ -351,10 +325,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() {
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
})
|
||||
|
||||
})
|
||||
|
|
@ -378,11 +349,11 @@ var _ = ginkgo.Describe("OverridePolicy with nil resourceSelectors", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
overridePolicy := helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, nil, policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
}, policyv1alpha1.Overriders{
|
||||
ImageOverrider: []policyv1alpha1.ImageOverrider{
|
||||
{
|
||||
|
|
@ -397,10 +368,7 @@ var _ = ginkgo.Describe("OverridePolicy with nil resourceSelectors", func() {
|
|||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Create(context.TODO(), propagationPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, propagationPolicy)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
|
@ -418,21 +386,15 @@ var _ = ginkgo.Describe("OverridePolicy with nil resourceSelectors", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing propagationPolicy(%s/%s)", propagationPolicyNamespace, propagationPolicyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(propagationPolicyNamespace).Delete(context.TODO(), propagationPolicyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("deployment imageOverride testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check if deployment present on member clusters have correct image value", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
var deploymentInCluster *appsv1.Deployment
|
||||
|
|
@ -454,10 +416,7 @@ var _ = ginkgo.Describe("OverridePolicy with nil resourceSelectors", func() {
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
})
|
||||
|
||||
})
|
||||
|
|
|
|||
|
|
@ -7,11 +7,11 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"k8s.io/utils/pointer"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -41,110 +41,32 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("deployment propagation testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if deployment present on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) present on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
_, err = clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("updating deployment", func() {
|
||||
patch := map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"replicas": pointer.Int32Ptr(updateDeploymentReplicas),
|
||||
},
|
||||
}
|
||||
bytes, err := json.Marshal(patch)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = kubeClient.AppsV1().Deployments(deploymentNamespace).Patch(context.TODO(), deploymentName, types.StrategicMergePatchType, bytes, metav1.PatchOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
framework.WaitDeploymentPresentOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
|
||||
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas)
|
||||
ginkgo.By("check if update has been synced to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) synced on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
dep, err := clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if *dep.Spec.Replicas == updateDeploymentReplicas {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(clusterClient, deploymentNamespace, deploymentName,
|
||||
func(deployment *appsv1.Deployment) bool {
|
||||
return *deployment.Spec.Replicas == updateDeploymentReplicas
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if deployment has been deleted from member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) disappear on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
_, err = clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.WaitDeploymentDisappearOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -163,33 +85,21 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("service propagation testing", func() {
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating service(%s/%s)", serviceNamespace, serviceName), func() {
|
||||
_, err := kubeClient.CoreV1().Services(serviceNamespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if service present on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for service(%s/%s) present on cluster(%s)", serviceNamespace, serviceName, cluster.Name)
|
||||
|
|
@ -223,8 +133,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if update has been synced to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for service(%s/%s) synced on cluster(%s)", serviceNamespace, serviceName, cluster.Name)
|
||||
|
|
@ -250,8 +160,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if service has been deleted from member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for service(%s/%s) disappear on cluster(%s)", serviceNamespace, serviceName, cluster.Name)
|
||||
|
|
@ -269,6 +179,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -287,33 +199,21 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("pod propagation testing", func() {
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating pod(%s/%s)", podNamespace, podName), func() {
|
||||
_, err := kubeClient.CoreV1().Pods(podNamespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if pod present on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for pod(%s/%s) present on cluster(%s)", podNamespace, podName, cluster.Name)
|
||||
|
|
@ -347,8 +247,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if update has been synced to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for pod(%s/%s) synced on cluster(%s)", podNamespace, podName, cluster.Name)
|
||||
|
|
@ -374,8 +274,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if pod has been deleted from member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for pod(%s/%s) disappear on cluster(%s)", podNamespace, podName, cluster.Name)
|
||||
|
|
@ -393,6 +293,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -414,10 +316,9 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
crdGVR := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
|
||||
|
||||
crNamespace := testNamespace
|
||||
crName := crdNamePrefix + rand.String(RandomStrLength)
|
||||
|
|
@ -433,84 +334,27 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating crdPolicy(%s)", crdPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), crdPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating crd(%s)", crd.Name), func() {
|
||||
unstructObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(crd)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Create(context.TODO(), &unstructured.Unstructured{Object: unstructObj}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("get crd(%s)", crd.Name), func() {
|
||||
_, err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
// Check CRD enablement from cluster objects instead of member clusters.
|
||||
// After CRD installed on member cluster, the cluster status controller takes at most cluster-status-update-frequency
|
||||
// time to collect the API list, before that the scheduler will filter out the cluster from scheduling.
|
||||
ginkgo.By("check if crd present on member clusters", func() {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
clusters, err := fetchClusters(karmadaClient)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
if !helper.IsAPIEnabled(cluster.Status.APIEnablements, crAPIVersion, crdSpecNames.Kind) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating crPolicy(%s/%s)", crPolicy.Namespace, crPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(crPolicy.Namespace).Create(context.TODO(), crPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing crPolicy(%s/%s)", crPolicy.Namespace, crPolicy.Name), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(crPolicy.Namespace).Delete(context.TODO(), crPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing crdPolicy(%s)", crdPolicy.Name), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), crdPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing crd(%s)", crd.Name), func() {
|
||||
err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("namespaceScoped cr propagation testing", func() {
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, crdPolicy)
|
||||
framework.CreateCRD(dynamicClient, crd)
|
||||
framework.GetCRD(dynamicClient, crd.Name)
|
||||
framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(),
|
||||
fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1"), crd.Spec.Names.Kind)
|
||||
|
||||
framework.CreatePropagationPolicy(karmadaClient, crPolicy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating cr(%s/%s)", crNamespace, crName), func() {
|
||||
_, err := dynamicClient.Resource(crGVR).Namespace(crNamespace).Create(context.TODO(), cr, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if cr present on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterDynamicClient := getClusterDynamicClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster.Name)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for cr(%s/%s) present on cluster(%s)", crNamespace, crName, cluster.Name)
|
||||
|
|
@ -544,8 +388,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if update has been synced to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterDynamicClient := getClusterDynamicClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster.Name)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for cr(%s/%s) synced on cluster(%s)", crNamespace, crName, cluster.Name)
|
||||
|
|
@ -575,8 +419,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if cr has been deleted from member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterDynamicClient := getClusterDynamicClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterDynamicClient := framework.GetClusterDynamicClient(cluster.Name)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for cr(%s/%s) disappear on cluster(%s)", crNamespace, crName, cluster.Name)
|
||||
|
|
@ -594,6 +438,11 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, crPolicy.Namespace, crPolicy.Name)
|
||||
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, crdPolicy.Name)
|
||||
framework.RemoveCRD(dynamicClient, crd.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -612,33 +461,21 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("job propagation testing", func() {
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating job(%s/%s)", jobNamespace, jobName), func() {
|
||||
_, err := kubeClient.BatchV1().Jobs(jobNamespace).Create(context.TODO(), job, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if job present on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for job(%s/%s) present on cluster(%s)", jobNamespace, jobName, cluster.Name)
|
||||
|
|
@ -672,8 +509,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if update has been synced to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for job(%s/%s) synced on cluster(%s)", jobNamespace, jobName, cluster.Name)
|
||||
|
|
@ -700,8 +537,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.By("check if job has been deleted from member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for job(%s/%s) disappear on cluster(%s)", jobNamespace, jobName, cluster.Name)
|
||||
|
|
@ -719,6 +556,8 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package e2e
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
|
@ -15,6 +14,7 @@ import (
|
|||
"k8s.io/utils/pointer"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -35,32 +35,16 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("deployment status collection testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check whether the deployment status can be correctly collected", func() {
|
||||
wantedReplicas := *deployment.Spec.Replicas * int32(len(clusters))
|
||||
wantedReplicas := *deployment.Spec.Replicas * int32(len(framework.Clusters()))
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) collecting correctly status", deploymentNamespace, deploymentName)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
|
|
@ -94,7 +78,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
|||
})
|
||||
|
||||
ginkgo.By("check if deployment status has been update whit new collection", func() {
|
||||
wantedReplicas := updateDeploymentReplicas * int32(len(clusters))
|
||||
wantedReplicas := updateDeploymentReplicas * int32(len(framework.Clusters()))
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) collecting correctly status", deploymentNamespace, deploymentName)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
|
|
@ -113,10 +97,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -64,27 +65,9 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
klog.Infof("created policy(%s/%s)", policyNamespace, policyName)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("deployment propagation with label and group constraints testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deployment.Namespace, deployment.Name), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
klog.Infof("created deployment(%s/%s)", deployment.Namespace, deployment.Name)
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("collect the target clusters in resource binding", func() {
|
||||
var err error
|
||||
|
|
@ -104,7 +87,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
|
||||
ginkgo.By("check if deployment present on right clusters", func() {
|
||||
for _, targetClusterName := range targetClusterNames {
|
||||
clusterClient := getClusterClient(targetClusterName)
|
||||
clusterClient := framework.GetClusterClient(targetClusterName)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Check whether deployment(%s/%s) is present on cluster(%s)", deploymentNamespace, deploymentName, targetClusterName)
|
||||
|
|
@ -142,7 +125,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
|
||||
ginkgo.By("check if update has been synced to member clusters", func() {
|
||||
for _, cluster := range groupMatchedClusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) synced on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
|
||||
|
|
@ -162,31 +145,8 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if deployment has been deleted from member clusters", func() {
|
||||
for _, cluster := range groupMatchedClusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for deployment(%s/%s) disappear on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
_, err = clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
ginkgo.Context("CustomResourceDefinition propagation testing", func() {
|
||||
|
|
@ -227,33 +187,10 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
})
|
||||
crdGVR := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating crdPolicy(%s)", crdPolicy.Name), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), crdPolicy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing crdPolicy(%s)", crdPolicy.Name), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), crdPolicy.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("crd with specified label and group constraints propagation testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating crd(%s)", crd.Name), func() {
|
||||
unstructObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(crd)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Create(context.TODO(), &unstructured.Unstructured{Object: unstructObj}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("get crd(%s)", crd.Name), func() {
|
||||
_, err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, crdPolicy)
|
||||
framework.CreateCRD(dynamicClient, crd)
|
||||
framework.GetCRD(dynamicClient, crd.Name)
|
||||
|
||||
ginkgo.By("collect the target clusters in cluster resource binding", func() {
|
||||
bindingName := names.GenerateBindingName(crd.Kind, crd.Name)
|
||||
|
|
@ -287,7 +224,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
|
||||
ginkgo.By("check if crd present on right clusters", func() {
|
||||
for _, targetClusterName := range targetClusterNames {
|
||||
clusterDynamicClient := getClusterDynamicClient(targetClusterName)
|
||||
clusterDynamicClient := framework.GetClusterDynamicClient(targetClusterName)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for crd(%s) present on cluster(%s)", crd.Name, targetClusterName)
|
||||
|
|
@ -310,30 +247,9 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
gomega.Expect(minGroups == len(groupMatchedClusters)).ShouldNot(gomega.BeFalse())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing crd(%s)", crd.Name), func() {
|
||||
err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By("check if crd with specified label and group constraints disappeared from member clusters", func() {
|
||||
for _, cluster := range groupMatchedClusters {
|
||||
clusterDynamicClient := getClusterDynamicClient(cluster.Name)
|
||||
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
klog.Infof("Waiting for crd(%s) disappeared on cluster(%s)\n", crd.Name, cluster.Name)
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
_, err = clusterDynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
framework.RemoveCRD(dynamicClient, crd.Name)
|
||||
framework.WaitCRDDisappearedOnClusters(framework.GetClusterNamesFromClusters(groupMatchedClusters), crd.Name)
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, crdPolicy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -372,7 +288,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated,
|
||||
|
|
@ -380,19 +296,12 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
})
|
||||
|
||||
ginkgo.It("replicas duplicated testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check if deployment's replicas are duplicate on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() *int32 {
|
||||
|
|
@ -406,15 +315,8 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -434,45 +336,20 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated,
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("replicas duplicated testing when rescheduling", func() {
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("make sure deployment has been propagated to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
|
|
@ -484,20 +361,10 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Update deployment(%s/%s)'s replicas", deploymentNamespace, deploymentName), func() {
|
||||
updateReplicas := int32(2)
|
||||
deployment.Spec.Replicas = &updateReplicas
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
klog.Infof("Update deployment(%s/%s)'s replicas to %d", deploymentNamespace, deploymentName, *deployment.Spec.Replicas)
|
||||
})
|
||||
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas)
|
||||
ginkgo.By("check if deployment's replicas have been updated on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() *int32 {
|
||||
|
|
@ -510,6 +377,9 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
}, pollTimeout, pollInterval).Should(gomega.Equal(deployment.Spec.Replicas))
|
||||
}
|
||||
})
|
||||
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -530,7 +400,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||
|
|
@ -539,22 +409,15 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
})
|
||||
|
||||
ginkgo.It("replicas divided and weighted testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
expectedReplicas := int32(2)
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
updateReplicas := expectedReplicas * int32(len(clusters))
|
||||
deployment.Spec.Replicas = &updateReplicas
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
updateReplicas := expectedReplicas * int32(len(framework.Clusters()))
|
||||
deployment.Spec.Replicas = &updateReplicas
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check if deployment's replicas are divided equally on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() int32 {
|
||||
|
|
@ -568,15 +431,8 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -598,7 +454,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||
|
|
@ -606,63 +462,18 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("replicas divided and weighted testing when rescheduling", func() {
|
||||
ginkgo.By("make sure deployment has been propagated to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
return true
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
framework.WaitDeploymentPresentOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
|
||||
|
||||
expectedReplicas := int32(3)
|
||||
ginkgo.By(fmt.Sprintf("Update deployment(%s/%s)'s replicas to 3*len(clusters)", policyNamespace, policyName), func() {
|
||||
updateReplicas := expectedReplicas * int32(len(clusters))
|
||||
deployment.Spec.Replicas = &updateReplicas
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
updateReplicas := expectedReplicas * int32(len(framework.Clusters()))
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateReplicas)
|
||||
|
||||
ginkgo.By("check if deployment's replicas are divided equally on member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() int32 {
|
||||
|
|
@ -675,6 +486,9 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
}, pollTimeout, pollInterval).Should(gomega.Equal(expectedReplicas))
|
||||
}
|
||||
})
|
||||
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -695,7 +509,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||
|
|
@ -707,7 +521,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
ginkgo.It("replicas divided and weighted testing", func() {
|
||||
sumWeight := 0
|
||||
staticWeightLists := make([]policyv1alpha1.StaticClusterWeight, 0)
|
||||
for index, clusterName := range clusterNames {
|
||||
for index, clusterName := range framework.ClusterNames() {
|
||||
staticWeightList := policyv1alpha1.StaticClusterWeight{
|
||||
TargetCluster: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{clusterName},
|
||||
|
|
@ -721,24 +535,16 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
policy.Spec.Placement.ReplicaScheduling.WeightPreference.StaticWeightList = staticWeightLists
|
||||
klog.Infof("Sum weight of clusters is %d", sumWeight)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
sumReplicas := int32(sumWeight)
|
||||
deployment.Spec.Replicas = &sumReplicas
|
||||
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
sumReplicas := int32(sumWeight)
|
||||
deployment.Spec.Replicas = &sumReplicas
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By("check if deployment's replicas are divided equally on member clusters", func() {
|
||||
for index, cluster := range clusters {
|
||||
for index, cluster := range framework.Clusters() {
|
||||
expectedReplicas := int32(index + 1)
|
||||
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() int32 {
|
||||
|
|
@ -752,15 +558,8 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
|
||||
})
|
||||
|
|
@ -783,7 +582,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||
|
|
@ -793,81 +592,49 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
staticWeightLists := make([]policyv1alpha1.StaticClusterWeight, 0)
|
||||
for index, clusterName := range clusterNames {
|
||||
staticWeightList := policyv1alpha1.StaticClusterWeight{
|
||||
TargetCluster: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{clusterName},
|
||||
},
|
||||
Weight: int64(index + 1),
|
||||
}
|
||||
staticWeightLists = append(staticWeightLists, staticWeightList)
|
||||
staticWeightLists := make([]policyv1alpha1.StaticClusterWeight, 0)
|
||||
for index, clusterName := range framework.ClusterNames() {
|
||||
staticWeightList := policyv1alpha1.StaticClusterWeight{
|
||||
TargetCluster: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{clusterName},
|
||||
},
|
||||
Weight: int64(index + 1),
|
||||
}
|
||||
klog.Infof("StaticWeightList of policy is %+v", staticWeightLists)
|
||||
policy.Spec.Placement.ReplicaScheduling.WeightPreference.StaticWeightList = staticWeightLists
|
||||
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
staticWeightLists = append(staticWeightLists, staticWeightList)
|
||||
}
|
||||
klog.Infof("StaticWeightList of policy is %+v", staticWeightLists)
|
||||
policy.Spec.Placement.ReplicaScheduling.WeightPreference.StaticWeightList = staticWeightLists
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("replicas divided and weighted testing when rescheduling", func() {
|
||||
ginkgo.By("make sure deployment has been propagated to member clusters", func() {
|
||||
for _, cluster := range clusters {
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
framework.WaitDeploymentPresentOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
return true
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Update deployment(%s/%s)'s replicas to 2*sumWeight", policyNamespace, policyName), func() {
|
||||
sumWeight := 0
|
||||
for index := range clusterNames {
|
||||
sumWeight += index + 1
|
||||
}
|
||||
klog.Infof("sumWeight of clusters is %d", sumWeight)
|
||||
updateReplicas := 2 * int32(sumWeight)
|
||||
deployment.Spec.Replicas = &updateReplicas
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
_, err := kubeClient.AppsV1().Deployments(deploymentNamespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
sumWeight := 0
|
||||
for index := range framework.ClusterNames() {
|
||||
sumWeight += index + 1
|
||||
}
|
||||
klog.Infof("sumWeight of clusters is %d", sumWeight)
|
||||
updateReplicas := 2 * int32(sumWeight)
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateReplicas)
|
||||
|
||||
ginkgo.By("check if deployment's replicas are divided equally on member clusters", func() {
|
||||
for index, cluster := range clusters {
|
||||
for index, cluster := range framework.Clusters() {
|
||||
expectedReplicas := 2 * int32(index+1)
|
||||
|
||||
clusterClient := getClusterClient(cluster.Name)
|
||||
clusterClient := framework.GetClusterClient(cluster.Name)
|
||||
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() int32 {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -13,14 +12,12 @@ import (
|
|||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/kind/pkg/cluster"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
|
|
@ -29,6 +26,7 @@ import (
|
|||
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -43,9 +41,6 @@ const (
|
|||
// pollTimeout defines the time after which the poll operation times out.
|
||||
pollTimeout = 300 * time.Second
|
||||
|
||||
// MinimumCluster represents the minimum number of member clusters to run E2E test.
|
||||
MinimumCluster = 2
|
||||
|
||||
// RandomStrLength represents the random string length to combine names.
|
||||
RandomStrLength = 3
|
||||
)
|
||||
|
|
@ -71,13 +66,8 @@ var (
|
|||
karmadaClient karmada.Interface
|
||||
dynamicClient dynamic.Interface
|
||||
controlPlaneClient client.Client
|
||||
clusters []*clusterv1alpha1.Cluster
|
||||
clusterNames []string
|
||||
clusterClients []*util.ClusterClient
|
||||
clusterDynamicClients []*util.DynamicClusterClient
|
||||
testNamespace = fmt.Sprintf("karmadatest-%s", rand.String(RandomStrLength))
|
||||
clusterProvider *cluster.Provider
|
||||
pullModeClusters map[string]string
|
||||
clusterLabels = map[string]string{"location": "CHN"}
|
||||
pushModeClusterLabels = map[string]string{"sync-mode": "Push"}
|
||||
)
|
||||
|
|
@ -107,112 +97,28 @@ var _ = ginkgo.BeforeSuite(func() {
|
|||
|
||||
controlPlaneClient = gclient.NewForConfigOrDie(restConfig)
|
||||
|
||||
pullModeClusters, err = fetchPullBasedClusters()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
framework.InitClusterInformation(karmadaClient, controlPlaneClient)
|
||||
|
||||
clusters, err = fetchClusters(karmadaClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
var meetRequirement bool
|
||||
meetRequirement, err = isClusterMeetRequirements(clusters)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Expect(meetRequirement).Should(gomega.BeTrue())
|
||||
|
||||
for _, cluster := range clusters {
|
||||
clusterClient, clusterDynamicClient, err := newClusterClientSet(cluster)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
clusterNames = append(clusterNames, cluster.Name)
|
||||
clusterClients = append(clusterClients, clusterClient)
|
||||
clusterDynamicClients = append(clusterDynamicClients, clusterDynamicClient)
|
||||
|
||||
err = SetClusterLabel(controlPlaneClient, cluster.Name)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
gomega.Expect(clusterNames).Should(gomega.HaveLen(len(clusters)))
|
||||
|
||||
clusters, err = fetchClusters(karmadaClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
fmt.Printf("There are %d clusters\n", len(clusters))
|
||||
|
||||
err = setupTestNamespace(testNamespace, kubeClient, clusterClients)
|
||||
err = setupTestNamespace(testNamespace, kubeClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}, TestSuiteSetupTimeOut.Seconds())
|
||||
|
||||
var _ = ginkgo.AfterSuite(func() {
|
||||
// cleanup clusterLabels set by the E2E test
|
||||
for _, cluster := range clusters {
|
||||
err := DeleteClusterLabel(controlPlaneClient, cluster.Name)
|
||||
for _, cluster := range framework.Clusters() {
|
||||
err := deleteClusterLabel(controlPlaneClient, cluster.Name)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// cleanup all namespaces we created both in control plane and member clusters.
|
||||
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
|
||||
err := cleanupTestNamespace(testNamespace, kubeClient, clusterClients)
|
||||
err := cleanupTestNamespace(testNamespace, kubeClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}, TestSuiteTeardownTimeOut.Seconds())
|
||||
|
||||
func fetchPullBasedClusters() (map[string]string, error) {
|
||||
pullBasedClusters := os.Getenv("PULL_BASED_CLUSTERS")
|
||||
if pullBasedClusters == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pullBasedClustersMap := make(map[string]string)
|
||||
pullBasedClusters = strings.TrimSuffix(pullBasedClusters, ";")
|
||||
clusterInfo := strings.Split(pullBasedClusters, ";")
|
||||
for _, cluster := range clusterInfo {
|
||||
clusterNameAndConfigPath := strings.Split(cluster, ":")
|
||||
if len(clusterNameAndConfigPath) != 2 {
|
||||
return nil, fmt.Errorf("failed to parse config path for cluster: %s", cluster)
|
||||
}
|
||||
pullBasedClustersMap[clusterNameAndConfigPath[0]] = clusterNameAndConfigPath[1]
|
||||
}
|
||||
return pullBasedClustersMap, nil
|
||||
}
|
||||
|
||||
// fetchClusters will fetch all member clusters we have.
|
||||
func fetchClusters(client karmada.Interface) ([]*clusterv1alpha1.Cluster, error) {
|
||||
clusterList, err := client.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clusters := make([]*clusterv1alpha1.Cluster, 0, len(clusterList.Items))
|
||||
for _, cluster := range clusterList.Items {
|
||||
pinedCluster := cluster
|
||||
if pinedCluster.Spec.SyncMode == clusterv1alpha1.Pull {
|
||||
if _, exist := pullModeClusters[cluster.Name]; !exist {
|
||||
continue
|
||||
}
|
||||
}
|
||||
clusters = append(clusters, &pinedCluster)
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
// isClusterMeetRequirements checks if current environment meet the requirements of E2E.
|
||||
func isClusterMeetRequirements(clusters []*clusterv1alpha1.Cluster) (bool, error) {
|
||||
// check if member cluster number meets requirements
|
||||
if len(clusters) < MinimumCluster {
|
||||
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumCluster, len(clusters))
|
||||
}
|
||||
|
||||
// check if all member cluster status is ready
|
||||
for _, cluster := range clusters {
|
||||
if !util.IsClusterReady(&cluster.Status) {
|
||||
return false, fmt.Errorf("cluster %s not ready", cluster.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
klog.Infof("Got %d member cluster and all in ready state.", len(clusters))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// setupTestNamespace will create a namespace in control plane and all member clusters, most of cases will run against it.
|
||||
// The reason why we need a separated namespace is it will make it easier to cleanup resources deployed by the testing.
|
||||
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, clusterClients []*util.ClusterClient) error {
|
||||
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface) error {
|
||||
namespaceObj := helper.NewNamespace(namespace)
|
||||
_, err := util.CreateNamespace(kubeClient, namespaceObj)
|
||||
if err != nil {
|
||||
|
|
@ -223,7 +129,7 @@ func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, clust
|
|||
}
|
||||
|
||||
// cleanupTestNamespace will remove the namespace we setup before for the whole testing.
|
||||
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, clusterClients []*util.ClusterClient) error {
|
||||
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface) error {
|
||||
err := util.DeleteNamespace(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -232,26 +138,6 @@ func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, clu
|
|||
return nil
|
||||
}
|
||||
|
||||
func getClusterClient(clusterName string) kubernetes.Interface {
|
||||
for _, client := range clusterClients {
|
||||
if client.ClusterName == clusterName {
|
||||
return client.KubeClient
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getClusterDynamicClient(clusterName string) dynamic.Interface {
|
||||
for _, client := range clusterDynamicClients {
|
||||
if client.ClusterName == clusterName {
|
||||
return client.DynamicClientSet
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createCluster(clusterName, kubeConfigPath, controlPlane, clusterContext string) error {
|
||||
err := clusterProvider.Create(clusterName, cluster.CreateWithKubeconfigPath(kubeConfigPath))
|
||||
if err != nil {
|
||||
|
|
@ -289,63 +175,8 @@ func deleteCluster(clusterName, kubeConfigPath string) error {
|
|||
return clusterProvider.Delete(clusterName, kubeConfigPath)
|
||||
}
|
||||
|
||||
func newClusterClientSet(c *clusterv1alpha1.Cluster) (*util.ClusterClient, *util.DynamicClusterClient, error) {
|
||||
if c.Spec.SyncMode == clusterv1alpha1.Push {
|
||||
clusterClient, err := util.NewClusterClientSet(c, controlPlaneClient, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
clusterDynamicClient, err := util.NewClusterDynamicClientSet(c, controlPlaneClient)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return clusterClient, clusterDynamicClient, nil
|
||||
}
|
||||
|
||||
clusterConfigPath := pullModeClusters[c.Name]
|
||||
clusterConfig, err := clientcmd.BuildConfigFromFlags("", clusterConfigPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
clusterClientSet := util.ClusterClient{ClusterName: c.Name}
|
||||
clusterDynamicClientSet := util.DynamicClusterClient{ClusterName: c.Name}
|
||||
clusterClientSet.KubeClient = kubernetes.NewForConfigOrDie(clusterConfig)
|
||||
clusterDynamicClientSet.DynamicClientSet = dynamic.NewForConfigOrDie(clusterConfig)
|
||||
|
||||
return &clusterClientSet, &clusterDynamicClientSet, nil
|
||||
}
|
||||
|
||||
// set cluster label of E2E
|
||||
func SetClusterLabel(c client.Client, clusterName string) error {
|
||||
err := wait.PollImmediate(2*time.Second, 10*time.Second, func() (done bool, err error) {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if clusterObj.Labels == nil {
|
||||
clusterObj.Labels = make(map[string]string)
|
||||
}
|
||||
clusterObj.Labels["location"] = "CHN"
|
||||
if clusterObj.Spec.SyncMode == clusterv1alpha1.Push {
|
||||
clusterObj.Labels["sync-mode"] = "Push"
|
||||
}
|
||||
if err := c.Update(context.TODO(), clusterObj); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// delete cluster label of E2E
|
||||
func DeleteClusterLabel(c client.Client, clusterName string) error {
|
||||
// deleteClusterLabel delete cluster label of E2E
|
||||
func deleteClusterLabel(c client.Client, clusterName string) error {
|
||||
err := wait.PollImmediate(2*time.Second, 10*time.Second, func() (done bool, err error) {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
|
||||
|
|
|
|||
|
|
@ -7,13 +7,13 @@ import (
|
|||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
|
|
@ -45,21 +45,14 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func()
|
|||
},
|
||||
}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusterNames,
|
||||
ClusterNames: framework.ClusterNames(),
|
||||
},
|
||||
ClusterTolerations: clusterTolerations,
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("adding taints to clusters", func() {
|
||||
for _, clusterName := range clusterNames {
|
||||
for _, clusterName := range framework.ClusterNames() {
|
||||
taints := constructAddedTaints(tolerationKey, clusterName)
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
|
|
@ -81,16 +74,9 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func()
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("removing taints in cluster", func() {
|
||||
for _, clusterName := range clusterNames {
|
||||
for _, clusterName := range framework.ClusterNames() {
|
||||
gomega.Eventually(func() bool {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
err := controlPlaneClient.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj)
|
||||
|
|
@ -111,10 +97,8 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func()
|
|||
})
|
||||
|
||||
ginkgo.It("deployment with cluster tolerations testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deployment.Namespace, deployment.Name), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateDeployment(kubeClient, deployment)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("check if deployment(%s/%s) only scheduled to tolerated cluster(%s)", deploymentNamespace, deploymentName, tolerationValue), func() {
|
||||
targetClusterNames, err := getTargetClusterNames(deployment)
|
||||
|
|
@ -123,10 +107,8 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func()
|
|||
gomega.Expect(targetClusterNames[0] == tolerationValue).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
Loading…
Reference in New Issue