Run karmada-interpreter-webhook-example in CI

Signed-off-by: lonelyCZ <531187475@qq.com>
This commit is contained in:
lonelyCZ 2021-12-30 20:46:15 +08:00
parent 71c95f76f5
commit f4c71a6370
8 changed files with 341 additions and 0 deletions

35
hack/post-run-e2e.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
# variable define
KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"}
MAIN_KUBECONFIG=${MAIN_KUBECONFIG:-"${KUBECONFIG_PATH}/karmada.config"}
HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"}
KARMADA_APISERVER=${KARMADA_APISERVER:-"karmada-apiserver"}
MEMBER_CLUSTER_KUBECONFIG=${MEMBER_CLUSTER_KUBECONFIG:-"${KUBECONFIG_PATH}/members.config"}
MEMBER_CLUSTER_1_NAME=${MEMBER_CLUSTER_1_NAME:-"member1"}
MEMBER_CLUSTER_2_NAME=${MEMBER_CLUSTER_2_NAME:-"member2"}
PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"}
# delete interpreter webhook example in karmada-host
export KUBECONFIG="${MAIN_KUBECONFIG}"
kubectl config use-context "${HOST_CLUSTER_NAME}"
kubectl delete -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml
# delete interpreter workload webhook configuration
kubectl config use-context "${KARMADA_APISERVER}"
kubectl delete ResourceInterpreterWebhookConfiguration examples
# delete interpreter example workload CRD in karamada-apiserver and member clusters
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}"
kubectl config use-context "${MEMBER_CLUSTER_1_NAME}"
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
kubectl config use-context "${MEMBER_CLUSTER_2_NAME}"
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
kubectl config use-context "${PULL_MODE_CLUSTER_NAME}"
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"

46
hack/pre-run-e2e.sh Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${REPO_ROOT}"/hack/util.sh
# variable define
KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"}
MAIN_KUBECONFIG=${MAIN_KUBECONFIG:-"${KUBECONFIG_PATH}/karmada.config"}
HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"}
KARMADA_APISERVER=${KARMADA_APISERVER:-"karmada-apiserver"}
MEMBER_CLUSTER_KUBECONFIG=${MEMBER_CLUSTER_KUBECONFIG:-"${KUBECONFIG_PATH}/members.config"}
MEMBER_CLUSTER_1_NAME=${MEMBER_CLUSTER_1_NAME:-"member1"}
MEMBER_CLUSTER_2_NAME=${MEMBER_CLUSTER_2_NAME:-"member2"}
PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"}
export VERSION="latest"
export REGISTRY="swr.ap-southeast-1.myhuaweicloud.com/karmada"
CERT_DIR=${CERT_DIR:-"${HOME}/.karmada"}
ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
# load interpreter webhook example image
kind load docker-image "${REGISTRY}/karmada-interpreter-webhook-example:${VERSION}" --name="${HOST_CLUSTER_NAME}"
# deploy interpreter webhook example in karmada-host
export KUBECONFIG="${MAIN_KUBECONFIG}"
kubectl config use-context "${HOST_CLUSTER_NAME}"
kubectl apply -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml
util::wait_pod_ready "${INTERPRETER_WEBHOOK_EXAMPLE_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
# deploy interpreter workload webhook-configuration.yaml
kubectl config use-context "${KARMADA_APISERVER}"
util::deploy_webhook_configuration "${ROOT_CA_FILE}" "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration.yaml"
# install interpreter example workload CRD in karamada-apiserver and member clusters
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}"
kubectl config use-context "${MEMBER_CLUSTER_1_NAME}"
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
kubectl config use-context "${MEMBER_CLUSTER_2_NAME}"
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
kubectl config use-context "${PULL_MODE_CLUSTER_NAME}"
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"

View File

@ -29,6 +29,10 @@ mkdir -p "$ARTIFACTS_PATH"
# Install ginkgo
GO111MODULE=on go install github.com/onsi/ginkgo/ginkgo
# Pre run e2e for install extra conponents
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
"${REPO_ROOT}"/hack/pre-run-e2e.sh
# Run e2e
export KUBECONFIG=${KARMADA_APISERVER_KUBECONFIG}
export PULL_BASED_CLUSTERS=${PULL_BASED_CLUSTERS}
@ -50,4 +54,7 @@ fi
echo "Collected logs at $ARTIFACTS_PATH:"
ls -al "$ARTIFACTS_PATH"
# Post run e2e for delete extra conponents
"${REPO_ROOT}"/hack/post-run-e2e.sh
exit $TESTING_RESULT

View File

@ -15,6 +15,7 @@ KARMADA_CONTROLLER_LABEL="karmada-controller-manager"
KARMADA_SCHEDULER_LABEL="karmada-scheduler"
KARMADA_WEBHOOK_LABEL="karmada-webhook"
AGENT_POD_LABEL="karmada-agent"
INTERPRETER_WEBHOOK_EXAMPLE_LABEL="karmada-interpreter-webhook-example"
MIN_Go_VERSION=go1.16.0

View File

@ -0,0 +1,96 @@
package framework
import (
"context"
"encoding/json"
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1"
)
var workloadGVR = workloadv1alpha1.SchemeGroupVersion.WithResource("workloads")
// CreateWorkload create Workload with dynamic client
func CreateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload) {
ginkgo.By(fmt.Sprintf("Creating workload(%s/%s)", workload.Namespace, workload.Name), func() {
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(workload)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
_, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Create(context.TODO(), &unstructured.Unstructured{Object: unstructuredObj}, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// UpdateWorkload update Workload with dynamic client
func UpdateWorkload(client dynamic.Interface, workload *workloadv1alpha1.Workload, clusterName string) {
ginkgo.By(fmt.Sprintf("Update workload(%s/%s) in cluster(%s)", workload.Namespace, workload.Name, clusterName), func() {
newUnstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(workload)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
_, err = client.Resource(workloadGVR).Namespace(workload.Namespace).Update(context.TODO(), &unstructured.Unstructured{Object: newUnstructuredObj}, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
}
// GetWorkload get Workload with dynamic client.
func GetWorkload(client dynamic.Interface, namespace, name string) *workloadv1alpha1.Workload {
workload := workloadv1alpha1.Workload{}
ginkgo.By(fmt.Sprintf("Get workload(%s/%s)", namespace, name), func() {
var err error
unstructuredObj := &unstructured.Unstructured{}
gomega.Eventually(func() error {
unstructuredObj, err = client.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
result, err := unstructuredObj.MarshalJSON()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
err = json.Unmarshal(result, &workload)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
return &workload
}
// RemoveWorkload delete Workload with dynamic client.
func RemoveWorkload(client dynamic.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Remove workload(%s/%s)", namespace, name), func() {
err := client.Resource(workloadGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// WaitWorkloadDisappearOnCluster wait workload disappear on cluster until timeout.
func WaitWorkloadDisappearOnCluster(cluster, namespace, name string) {
clusterClient := GetClusterDynamicClient(cluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for workload disappear on cluster(%s)", cluster)
gomega.Eventually(func() bool {
_, err := clusterClient.Resource(workloadGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return apierrors.IsNotFound(err)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}
// WaitWorkloadDisappearOnClusters wait workload disappear on member clusters until timeout.
func WaitWorkloadDisappearOnClusters(clusters []string, namespace, name string) {
ginkgo.By(fmt.Sprintf("Check if workload(%s/%s) diappeare on member clusters", namespace, name), func() {
for _, clusterName := range clusters {
WaitWorkloadDisappearOnCluster(clusterName, namespace, name)
}
})
}

View File

@ -0,0 +1,124 @@
package e2e
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
testhelper "github.com/karmada-io/karmada/test/helper"
)
var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
ginkgo.Context("InterpreterOperation InterpretReplica testing", func() {
policyNamespace := testNamespace
policyName := workloadNamePrefix + rand.String(RandomStrLength)
workloadNamespace := testNamespace
workloadName := policyName
workload := testhelper.NewWorkload(workloadNamespace, workloadName)
policy := testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: workload.APIVersion,
Kind: workload.Kind,
Name: workload.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
})
ginkgo.It("InterpretReplica testing", func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateWorkload(dynamicClient, workload)
ginkgo.By("check if workload's replica is interpreted", func() {
resourceBindingName := names.GenerateBindingName(workload.Kind, workload.Name)
expectedReplicas := *workload.Spec.Replicas
gomega.Eventually(func(g gomega.Gomega) (int32, error) {
resourceBinding, err := karmadaClient.WorkV1alpha2().ResourceBindings(workload.Namespace).Get(context.TODO(), resourceBindingName, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
klog.Infof(fmt.Sprintf("ResourceBinding(%s/%s)'s replicas is %d, expected: %d.",
resourceBinding.Namespace, resourceBinding.Name, resourceBinding.Spec.Replicas, expectedReplicas))
return resourceBinding.Spec.Replicas, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(expectedReplicas))
})
framework.RemoveWorkload(dynamicClient, workload.Namespace, workload.Name)
framework.WaitWorkloadDisappearOnClusters(framework.ClusterNames(), workload.Namespace, workload.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})
// Now only support push mode cluster for Retain testing
// TODO(lonelyCZ): support pull mode cluster
ginkgo.Context("InterpreterOperation Retain testing", func() {
var waitTime = 5 * time.Second
var updatedPaused = true
policyNamespace := testNamespace
policyName := workloadNamePrefix + rand.String(RandomStrLength)
workloadNamespace := testNamespace
workloadName := policyName
pushModeClusters := []string{"member1", "member2"}
workload := testhelper.NewWorkload(workloadNamespace, workloadName)
policy := testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: workload.APIVersion,
Kind: workload.Kind,
Name: workload.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: pushModeClusters,
},
})
ginkgo.It("Retain testing", func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateWorkload(dynamicClient, workload)
ginkgo.By("update workload's spec.paused to true", func() {
for _, cluster := range pushModeClusters {
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
memberWorkload.Spec.Paused = updatedPaused
framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster)
}
})
// Wait executeController to reconcile then check if it is retained
time.Sleep(waitTime)
ginkgo.By("check if workload's spec.paused is retained", func() {
for _, cluster := range pushModeClusters {
clusterDynamicClient := framework.GetClusterDynamicClient(cluster)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
memberWorkload := framework.GetWorkload(clusterDynamicClient, workloadNamespace, workloadName)
return memberWorkload.Spec.Paused, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(updatedPaused))
}
})
framework.RemoveWorkload(dynamicClient, workload.Namespace, workload.Name)
framework.WaitWorkloadDisappearOnClusters(pushModeClusters, workload.Namespace, workload.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})
})

View File

@ -51,6 +51,7 @@ const (
podNamePrefix = "pod-"
crdNamePrefix = "cr-"
jobNamePrefix = "job-"
workloadNamePrefix = "workload-"
updateDeploymentReplicas = 6
updateServicePort = 81

View File

@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
worklodv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
)
@ -402,3 +403,33 @@ func NewClusterWithResource(name string, allocatable, allocating, allocated core
},
}
}
// NewWorkload will build a workload object.
func NewWorkload(namespace string, name string) *worklodv1alpha1.Workload {
podLabels := map[string]string{"app": "nginx"}
return &worklodv1alpha1.Workload{
TypeMeta: metav1.TypeMeta{
APIVersion: "workload.example.io/v1alpha1",
Kind: "Workload",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: worklodv1alpha1.WorkloadSpec{
Replicas: pointer.Int32Ptr(3),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx:1.19.0",
}},
},
},
},
}
}