Improve e2e test framework and add a policy test case (#78)

* Add E2E test case of basic policy propagation

* update vendor
This commit is contained in:
Hongcai Ren 2020-12-19 11:22:02 +08:00 committed by GitHub
parent dc017b0b8c
commit 8c312961f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 283 additions and 16 deletions

1
go.mod
View File

@ -15,5 +15,6 @@ require (
k8s.io/code-generator v0.19.3
k8s.io/component-base v0.19.3
k8s.io/klog/v2 v2.2.0
k8s.io/utils v0.0.0-20200729134348-d5654de09c73
sigs.k8s.io/controller-runtime v0.6.4
)

View File

@ -209,6 +209,9 @@ func (c *PropagationPolicyController) claimResources(owner string, workloads []*
return err
}
workloadLabel := workload.GetLabels()
if workloadLabel == nil {
workloadLabel = make(map[string]string, 1)
}
workloadLabel[util.PolicyClaimLabel] = owner
workload.SetLabels(workloadLabel)
_, err = c.DynamicClient.Resource(dynamicResource).Namespace(workload.GetNamespace()).Update(context.TODO(), workload, metav1.UpdateOptions{})

86
test/e2e/policy_test.go Normal file
View File

@ -0,0 +1,86 @@
package e2e
import (
"context"
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/karmada-io/karmada/test/helper"
)
var _ = ginkgo.Describe("[propagation policy] propagation policy functionality testing", func() {
deploymentName := rand.String(6)
deployment := helper.NewDeployment(testNamespace, deploymentName)
var err error
ginkgo.BeforeEach(func() {
_, err = kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
err = kubeClient.CoreV1().Namespaces().Delete(context.TODO(), testNamespace, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
// propagate single resource to two explicit clusters.
ginkgo.Context("single resource propagation testing", func() {
ginkgo.It("propagate deployment", func() {
policyName := rand.String(6)
policyNamespace := testNamespace // keep policy in the same namespace with the resource
policy := helper.NewPolicyWithSingleDeployment(policyNamespace, policyName, deployment, memberClusterNames)
ginkgo.By(fmt.Sprintf("creating policy: %s/%s", policyNamespace, policyName), func() {
_, err = karmadaClient.PropagationstrategyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if resource appear in member clusters", func() {
for _, cluster := range memberClusters {
clusterClient := getClusterClient(cluster.Name)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
err = wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
_, err = clusterClient.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return true, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
})
ginkgo.By(fmt.Sprintf("deleting policy: %s/%s", policyNamespace, policyName), func() {
err = karmadaClient.PropagationstrategyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if resource disappear from member clusters", func() {
for _, cluster := range memberClusters {
clusterClient := getClusterClient(cluster.Name)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
err = wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
_, err = clusterClient.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil
}
}
return false, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
})
})
})
// propagate two resource to two explicit clusters.
ginkgo.Context("multiple resource propagation testing", func() {
})
})

View File

@ -11,13 +11,16 @@ import (
"github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
clusterapi "github.com/karmada-io/karmada/pkg/apis/membercluster/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/test/helper"
)
const (
@ -26,15 +29,24 @@ const (
// TestSuiteTeardownTimeOut defines the time after which the suite tear down times out.
TestSuiteTeardownTimeOut = 300 * time.Second
// pollInterval defines the interval time for a poll operation.
pollInterval = 5 * time.Second
// pollTimeout defines the time after which the poll operation times out.
pollTimeout = 60 * time.Second
// MinimumMemberCluster represents the minimum number of member clusters to run E2E test.
MinimumMemberCluster = 2
)
var (
kubeconfig string
restConfig *rest.Config
kubeClient kubernetes.Interface
karmadaClient karmada.Interface
kubeconfig string
restConfig *rest.Config
kubeClient kubernetes.Interface
karmadaClient karmada.Interface
memberClusters []*clusterapi.MemberCluster
memberClusterNames []string
memberClusterClients []*util.ClusterClient
testNamespace = fmt.Sprintf("karmada-e2e-%s", rand.String(3))
)
func TestE2E(t *testing.T) {
@ -56,35 +68,110 @@ var _ = ginkgo.BeforeSuite(func() {
karmadaClient, err = karmada.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
meetRequirement, err := isMemberClusterMeetRequirements(karmadaClient)
memberClusters, err = fetchMemberClusters(karmadaClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
var meetRequirement bool
meetRequirement, err = isMemberClusterMeetRequirements(memberClusters)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(meetRequirement).Should(gomega.BeTrue())
for _, cluster := range memberClusters {
memberClusterNames = append(memberClusterNames, cluster.Name)
memberClusterClient, err := util.NewClusterClientSet(cluster, kubeClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
memberClusterClients = append(memberClusterClients, memberClusterClient)
}
gomega.Expect(memberClusterNames).Should(gomega.HaveLen(len(memberClusters)))
err = setupTestNamespace(testNamespace, kubeClient, memberClusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteSetupTimeOut.Seconds())
var _ = ginkgo.AfterSuite(func() {
// suite tear down, such as cleanup karmada environment.
// cleanup all namespaces we created both in control plane and member clusters.
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
err := cleanupTestNamespace(testNamespace, kubeClient, memberClusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteTeardownTimeOut.Seconds())
// isMemberClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isMemberClusterMeetRequirements(client karmada.Interface) (bool, error) {
// list all member cluster we have
clusters, err := client.MemberclusterV1alpha1().MemberClusters().List(context.TODO(), v1.ListOptions{})
// fetchMemberClusters will fetch all member clusters we have.
func fetchMemberClusters(client karmada.Interface) ([]*clusterapi.MemberCluster, error) {
clusterList, err := client.MemberclusterV1alpha1().MemberClusters().List(context.TODO(), v1.ListOptions{})
if err != nil {
return false, err
return nil, err
}
clusters := make([]*clusterapi.MemberCluster, 0, len(clusterList.Items))
for _, cluster := range clusterList.Items {
pinedCluster := cluster
clusters = append(clusters, &pinedCluster)
}
return clusters, nil
}
// isMemberClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isMemberClusterMeetRequirements(clusters []*clusterapi.MemberCluster) (bool, error) {
// check if member cluster number meets requirements
if len(clusters.Items) < MinimumMemberCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumMemberCluster, len(clusters.Items))
if len(clusters) < MinimumMemberCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumMemberCluster, len(clusters))
}
// check if all member cluster status is ready
for _, cluster := range clusters.Items {
if !util.IsMemberClusterReady(&cluster) {
for _, cluster := range clusters {
if !util.IsMemberClusterReady(cluster) {
return false, fmt.Errorf("cluster %s not ready", cluster.GetName())
}
}
klog.Infof("Got %d member cluster and all in ready state.", len(clusters.Items))
klog.Infof("Got %d member cluster and all in ready state.", len(clusters))
return true, nil
}
// setupTestNamespace will create a namespace in control plane and all member clusters, most of cases will run against it.
// The reason why we need a separated namespace is it will make it easier to cleanup resources deployed by the testing.
func setupTestNamespace(namespace string, kubeClient kubernetes.Interface, memberClusterClients []*util.ClusterClient) error {
namespaceObj := helper.NewNamespace(namespace)
_, err := util.CreateNamespace(kubeClient, namespaceObj)
if err != nil {
return err
}
for _, clusterClient := range memberClusterClients {
_, err = util.CreateNamespace(clusterClient.KubeClient, namespaceObj)
if err != nil {
return err
}
}
return nil
}
// cleanupTestNamespace will remove the namespace we setup before for the whole testing.
func cleanupTestNamespace(namespace string, kubeClient kubernetes.Interface, memberClusterClients []*util.ClusterClient) error {
err := util.DeleteNamespace(kubeClient, namespace)
if err != nil {
return err
}
for _, clusterClient := range memberClusterClients {
err = util.DeleteNamespace(clusterClient.KubeClient, namespace)
if err != nil {
return err
}
}
return nil
}
func getClusterClient(clusterName string) kubernetes.Interface {
for _, client := range memberClusterClients {
if client.ClusterName == clusterName {
return client.KubeClient
}
}
return nil
}

41
test/helper/deployment.go Normal file
View File

@ -0,0 +1,41 @@
package helper
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
// NewDeployment will build a deployment object.
func NewDeployment(namespace string, name string) *appsv1.Deployment {
podLabels := map[string]string{"app": "nginx"}
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32Ptr(3),
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx:1.19.0",
}},
},
},
},
}
}

15
test/helper/namespace.go Normal file
View File

@ -0,0 +1,15 @@
package helper
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewNamespace will build a Namespace object.
func NewNamespace(namespace string) *corev1.Namespace {
return &corev1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: namespace,
},
}
}

View File

@ -0,0 +1,33 @@
package helper
import (
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
propagationapi "github.com/karmada-io/karmada/pkg/apis/propagationstrategy/v1alpha1"
)
// NewPolicyWithSingleDeployment will build a PropagationPolicy object.
func NewPolicyWithSingleDeployment(namespace string, name string, deployment *appsv1.Deployment, clusters []string) *propagationapi.PropagationPolicy {
return &propagationapi.PropagationPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: propagationapi.PropagationSpec{
ResourceSelectors: []propagationapi.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Names: []string{deployment.Name},
Namespaces: []string{deployment.Namespace},
},
},
Placement: propagationapi.Placement{
ClusterAffinity: &propagationapi.ClusterAffinity{
ClusterNames: clusters,
},
},
},
}
}

1
vendor/modules.txt vendored
View File

@ -794,6 +794,7 @@ k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/sets
# k8s.io/utils v0.0.0-20200729134348-d5654de09c73
## explicit
k8s.io/utils/buffer
k8s.io/utils/integer
k8s.io/utils/net