add script for run e2e tests (#71)

This commit is contained in:
Hongcai Ren 2020-12-14 10:42:01 +08:00 committed by GitHub
parent 06ab939c45
commit 1186eeb9a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 136 additions and 1 deletions

24
hack/run-e2e.sh Executable file
View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
# This script runs e2e test against on karmada control plane.
# You should prepare your environment in advance and following environment may be you need to set or use default one.
# - CONTROL_PLANE_KUBECONFIG: absolute path of control plane KUBECONFIG file.
#
# Usage: hack/run-e2e.sh
# Example 1: hack/run-e2e.sh (run e2e with default config)
# Example 2: export CONTROL_PLANE_KUBECONFIG=<KUBECONFIG PATH> hack/run-e2e.sh (run e2e with your KUBECONFIG)
CONTROL_PLANE_KUBECONFIG=${CONTROL_PLANE_KUBECONFIG:-"${HOME}/.kube/karmada.config"}
export KUBECONFIG=${CONTROL_PLANE_KUBECONFIG}
# Install ginkgo
GO111MODULE=on go install github.com/onsi/ginkgo
# Run e2e
export KUBECONFIG=${CONTROL_PLANE_KUBECONFIG}
ginkgo -v -race -failFast ./test/e2e/

19
pkg/util/cluster.go Normal file
View File

@ -0,0 +1,19 @@
package util
import (
"github.com/huawei-cloudnative/karmada/pkg/apis/membercluster/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// IsMemberClusterReady tells whether the cluster in ready state via checking its conditions.
func IsMemberClusterReady(cluster *v1alpha1.MemberCluster) bool {
for _, condition := range cluster.Status.Conditions {
// TODO(RainbowMango): Condition type should be defined in API, and after that update this hard code accordingly.
if condition.Type == "ClusterReady" {
if condition.Status == metav1.ConditionTrue {
return true
}
}
}
return false
}

31
test/e2e/cluster_test.go Normal file
View File

@ -0,0 +1,31 @@
package e2e
import (
"github.com/onsi/ginkgo"
)
var _ = ginkgo.Describe("[cluster-lifecycle] [cluster-join] cluster lifecycle functionality testing", func() {
ginkgo.BeforeEach(func() {
// TODO(RainbowMango): create a new member cluster which will be used by following tests.
})
ginkgo.AfterEach(func() {
// TODO(RainbowMango): remove member clusters that created for this test
})
ginkgo.Context("normal cluster join and unjoin functionality", func() {
ginkgo.It("new cluster could be joined to control plane", func() {
ginkgo.By("join new member cluster", func() {
// TODO(RainbowMango): add implementations here
})
ginkgo.By("check member cluster status", func() {
// TODO(RainbowMango): add implementations here
})
ginkgo.By("unjoin member cluster")
})
})
ginkgo.Context("abnormal cluster join and unjoin functionality", func() {
// TODO(RainbowMango): add implementations here
})
})

View File

@ -1,11 +1,23 @@
package e2e package e2e
import ( import (
"context"
"fmt"
"os"
"testing" "testing"
"time" "time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
karmada "github.com/huawei-cloudnative/karmada/pkg/generated/clientset/versioned"
"github.com/huawei-cloudnative/karmada/pkg/util"
) )
const ( const (
@ -13,6 +25,16 @@ const (
TestSuiteSetupTimeOut = 300 * time.Second TestSuiteSetupTimeOut = 300 * time.Second
// TestSuiteTeardownTimeOut defines the time after which the suite tear down times out. // TestSuiteTeardownTimeOut defines the time after which the suite tear down times out.
TestSuiteTeardownTimeOut = 300 * time.Second TestSuiteTeardownTimeOut = 300 * time.Second
// MinimumMemberCluster represents the minimum number of member clusters to run E2E test.
MinimumMemberCluster = 2
)
var (
kubeconfig string
restConfig *rest.Config
kubeClient kubernetes.Interface
karmadaClient karmada.Interface
) )
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {
@ -21,9 +43,48 @@ func TestE2E(t *testing.T) {
} }
var _ = ginkgo.BeforeSuite(func() { var _ = ginkgo.BeforeSuite(func() {
// suite set up, such as get karmada environment ready. kubeconfig = os.Getenv("KUBECONFIG")
gomega.Expect(kubeconfig).ShouldNot(gomega.BeEmpty())
var err error
restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
kubeClient, err = kubernetes.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
karmadaClient, err = karmada.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
meetRequirement, err := isMemberClusterMeetRequirements(karmadaClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(meetRequirement).Should(gomega.BeTrue())
}, TestSuiteSetupTimeOut.Seconds()) }, TestSuiteSetupTimeOut.Seconds())
var _ = ginkgo.AfterSuite(func() { var _ = ginkgo.AfterSuite(func() {
// suite tear down, such as cleanup karmada environment. // suite tear down, such as cleanup karmada environment.
}, TestSuiteTeardownTimeOut.Seconds()) }, TestSuiteTeardownTimeOut.Seconds())
// isMemberClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isMemberClusterMeetRequirements(client karmada.Interface) (bool, error) {
// list all member cluster we have
clusters, err := client.MemberclusterV1alpha1().MemberClusters().List(context.TODO(), v1.ListOptions{})
if err != nil {
return false, err
}
// check if member cluster number meets requirements
if len(clusters.Items) < MinimumMemberCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumMemberCluster, len(clusters.Items))
}
// check if all member cluster status is ready
for _, cluster := range clusters.Items {
if !util.IsMemberClusterReady(&cluster) {
return false, fmt.Errorf("cluster %s not ready", cluster.GetName())
}
}
klog.Infof("Got %d member cluster and all in ready state.", len(clusters.Items))
return true, nil
}