add e2e test for dependencies distributor

Signed-off-by: lihanbo <lihanbo2@huawei.com>
This commit is contained in:
lihanbo 2022-03-17 14:41:59 +08:00
parent 294d92c69d
commit 2a631017ed
8 changed files with 452 additions and 0 deletions

View File

@ -29,6 +29,7 @@ spec:
- --bind-address=0.0.0.0
- --cluster-status-update-frequency=10s
- --secure-port=10357
- --feature-gates=PropagateDeps=true
- --v=4
volumeMounts:
- name: kubeconfig

View File

@ -0,0 +1,165 @@
package e2e
import (
"github.com/onsi/ginkgo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
"github.com/karmada-io/karmada/test/e2e/framework"
testhelper "github.com/karmada-io/karmada/test/helper"
)
var _ = ginkgo.Describe("[DependenciesDistributor] automatically propagate relevant resources testing", func() {
ginkgo.Context("dependencies propagation testing", func() {
initClusterNames := []string{"member1"}
updateClusterNames := []string{"member2"}
secretName := secretNamePrefix + rand.String(RandomStrLength)
configMapName := configMapNamePrefix + rand.String(RandomStrLength)
secret := testhelper.NewSecret(testNamespace, secretName, map[string][]byte{"user": []byte("karmada")})
configMap := testhelper.NewConfigMap(testNamespace, configMapName, map[string]string{"user": "karmada"})
ginkgo.It("configmap automatically propagation testing", func() {
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
deploymentName := policyName
deployment := testhelper.NewDeploymentReferencesConfigMap(testNamespace, deploymentName, configMapName)
policy := testhelper.NewPropagationPolicy(testNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deployment.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: initClusterNames,
},
})
policy.Spec.PropagateDeps = true
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
framework.CreateConfigMap(kubeClient, configMap)
ginkgo.By("check if the configmap is propagated automatically", func() {
framework.WaitDeploymentPresentOnClustersFitWith(initClusterNames, deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return true
})
framework.WaitConfigMapPresentOnClustersFitWith(initClusterNames, configMap.Namespace, configMapName,
func(configmap *corev1.ConfigMap) bool {
return true
})
})
ginkgo.By("updating propagation policy's clusterNames", func() {
patch := []map[string]interface{}{
{
"op": "replace",
"path": "/spec/placement/clusterAffinity/clusterNames",
"value": updateClusterNames,
},
}
framework.PatchPropagationPolicy(karmadaClient, policy.Namespace, policyName, patch, types.JSONPatchType)
framework.WaitDeploymentPresentOnClustersFitWith(updateClusterNames, deployment.Namespace, deploymentName,
func(deployment *appsv1.Deployment) bool {
return true
})
framework.WaitConfigMapPresentOnClustersFitWith(updateClusterNames, configMap.Namespace, configMapName,
func(configmap *corev1.ConfigMap) bool {
return true
})
})
ginkgo.By("updating configmap's data", func() {
patch := []map[string]interface{}{
{
"op": "replace",
"path": "/data/user",
"value": "karmada-e2e",
},
}
framework.UpdateConfigMapWithPatch(kubeClient, configMap.Namespace, configMapName, patch, types.JSONPatchType)
framework.WaitConfigMapPresentOnClustersFitWith(updateClusterNames, configMap.Namespace, configMapName,
func(configmap *corev1.ConfigMap) bool {
for key, value := range configmap.Data {
if key == "user" && value == "karmada-e2e" {
return true
}
}
return false
})
})
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
framework.RemoveConfigMap(kubeClient, configMap.Namespace, configMapName)
})
ginkgo.It("secret automatically propagation testing", func() {
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
deploymentName := policyName
deployment := testhelper.NewDeploymentReferencesSecret(testNamespace, deploymentName, secretName)
policy := testhelper.NewPropagationPolicy(testNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deployment.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: initClusterNames,
},
})
policy.Spec.PropagateDeps = true
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
framework.CreateSecret(kubeClient, secret)
ginkgo.By("check if the secret is propagated automatically", func() {
framework.WaitDeploymentPresentOnClustersFitWith(initClusterNames, deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return true
})
framework.WaitSecretPresentOnClustersFitWith(initClusterNames, secret.Namespace, secretName,
func(secret *corev1.Secret) bool {
return true
})
})
ginkgo.By("make the secret is not referenced by the deployment ", func() {
updateVolumes := []corev1.Volume{
{
Name: "vol-configmap",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMapName,
},
},
},
},
}
framework.UpdateDeploymentVolumes(kubeClient, deployment, updateVolumes)
framework.WaitSecretDisappearOnClusters(initClusterNames, secret.Namespace, secretName)
})
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
framework.RemoveSecret(kubeClient, secret.Namespace, secretName)
})
})
})

View File

@ -0,0 +1,66 @@
package framework
import (
"context"
"encoding/json"
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// CreateConfigMap create ConfigMap.
func CreateConfigMap(client kubernetes.Interface, configMap *corev1.ConfigMap) {
ginkgo.By(fmt.Sprintf("Creating ConfigMap(%s/%s)", configMap.Namespace, configMap.Name), func() {
_, err := client.CoreV1().ConfigMaps(configMap.Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// RemoveConfigMap delete ConfigMap.
func RemoveConfigMap(client kubernetes.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Removing ConfigMap(%s/%s)", namespace, name), func() {
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// WaitConfigMapPresentOnClustersFitWith wait configmap present on clusters sync with fit func.
func WaitConfigMapPresentOnClustersFitWith(clusters []string, namespace, name string, fit func(configmap *corev1.ConfigMap) bool) {
ginkgo.By(fmt.Sprintf("Waiting for configmap(%s/%s) synced on member clusters", namespace, name), func() {
for _, clusterName := range clusters {
WaitConfigMapPresentOnClusterFitWith(clusterName, namespace, name, fit)
}
})
}
// WaitConfigMapPresentOnClusterFitWith wait configmap present on member cluster sync with fit func.
func WaitConfigMapPresentOnClusterFitWith(cluster, namespace, name string, fit func(configmap *corev1.ConfigMap) bool) {
clusterClient := GetClusterClient(cluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for configmap(%s/%s) synced on cluster(%s)", namespace, name, cluster)
gomega.Eventually(func() bool {
configmap, err := clusterClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false
}
return fit(configmap)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}
// UpdateConfigMapWithPatch update configmap with patch bytes.
func UpdateConfigMapWithPatch(client kubernetes.Interface, namespace, name string, patch []map[string]interface{}, patchType types.PatchType) {
ginkgo.By(fmt.Sprintf("Updating configmap(%s/%s)", namespace, name), func() {
bytes, err := json.Marshal(patch)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
_, err = client.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), name, patchType, bytes, metav1.PatchOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -8,6 +8,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@ -102,6 +103,17 @@ func UpdateDeploymentAnnotations(client kubernetes.Interface, deployment *appsv1
})
}
// UpdateDeploymentVolumes update Deployment's volumes.
func UpdateDeploymentVolumes(client kubernetes.Interface, deployment *appsv1.Deployment, volumes []corev1.Volume) {
ginkgo.By(fmt.Sprintf("Updating Deployment(%s/%s)'s volumes", deployment.Namespace, deployment.Name), func() {
deployment.Spec.Template.Spec.Volumes = volumes
gomega.Eventually(func() error {
_, err := client.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
}
// ExtractTargetClustersFrom extract the target cluster names from deployment's related resourceBinding Information.
func ExtractTargetClustersFrom(c client.Client, deployment *appsv1.Deployment) []string {
bindingName := names.GenerateBindingName(deployment.Kind, deployment.Name)

View File

@ -2,11 +2,13 @@ package framework
import (
"context"
"encoding/json"
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
@ -27,3 +29,14 @@ func RemovePropagationPolicy(client karmada.Interface, namespace, name string) {
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// PatchPropagationPolicy patch PropagationPolicy with karmada client.
func PatchPropagationPolicy(client karmada.Interface, namespace, name string, patch []map[string]interface{}, patchType types.PatchType) {
ginkgo.By(fmt.Sprintf("Patching PropagationPolicy(%s/%s)", namespace, name), func() {
patchBytes, err := json.Marshal(patch)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
_, err = client.PolicyV1alpha1().PropagationPolicies(namespace).Patch(context.TODO(), name, patchType, patchBytes, metav1.PatchOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -0,0 +1,75 @@
package framework
import (
"context"
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// CreateSecret create Secret.
func CreateSecret(client kubernetes.Interface, secret *corev1.Secret) {
ginkgo.By(fmt.Sprintf("Creating Secret(%s/%s)", secret.Namespace, secret.Name), func() {
_, err := client.CoreV1().Secrets(secret.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// RemoveSecret delete Secret.
func RemoveSecret(client kubernetes.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Removing Secret(%s/%s)", namespace, name), func() {
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// WaitSecretPresentOnClustersFitWith wait secret present on clusters sync with fit func.
func WaitSecretPresentOnClustersFitWith(clusters []string, namespace, name string, fit func(secret *corev1.Secret) bool) {
ginkgo.By(fmt.Sprintf("Waiting for secret(%s/%s) synced on member clusters", namespace, name), func() {
for _, clusterName := range clusters {
WaitSecretPresentOnClusterFitWith(clusterName, namespace, name, fit)
}
})
}
// WaitSecretPresentOnClusterFitWith wait secret present on member cluster sync with fit func.
func WaitSecretPresentOnClusterFitWith(cluster, namespace, name string, fit func(secret *corev1.Secret) bool) {
clusterClient := GetClusterClient(cluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for secret(%s/%s) synced on cluster(%s)", namespace, name, cluster)
gomega.Eventually(func() bool {
secret, err := clusterClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false
}
return fit(secret)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}
// WaitSecretDisappearOnCluster wait secret disappear on cluster until timeout.
func WaitSecretDisappearOnCluster(cluster, namespace, name string) {
clusterClient := GetClusterClient(cluster)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for secret disappear on cluster(%s)", cluster)
gomega.Eventually(func() bool {
_, err := clusterClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return apierrors.IsNotFound(err)
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}
// WaitSecretDisappearOnClusters wait service disappear on member clusters until timeout.
func WaitSecretDisappearOnClusters(clusters []string, namespace, name string) {
ginkgo.By(fmt.Sprintf("Check if secret(%s/%s) diappeare on member clusters", namespace, name), func() {
for _, clusterName := range clusters {
WaitSecretDisappearOnCluster(clusterName, namespace, name)
}
})
}

View File

@ -53,6 +53,8 @@ const (
jobNamePrefix = "job-"
workloadNamePrefix = "workload-"
federatedResourceQuotaPrefix = "frq-"
configMapNamePrefix = "configmap-"
secretNamePrefix = "secret-"
updateDeploymentReplicas = 6
updateServicePort = 81

View File

@ -433,3 +433,121 @@ func NewWorkload(namespace string, name string) *worklodv1alpha1.Workload {
},
}
}
// NewDeploymentReferencesConfigMap will build a deployment object that reference configmap.
func NewDeploymentReferencesConfigMap(namespace, deploymentName, configMapName string) *appsv1.Deployment {
podLabels := map[string]string{"app": "nginx"}
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: deploymentName,
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32Ptr(3),
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx:1.19.0",
}},
Volumes: []corev1.Volume{
{
Name: "vol-configmap",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMapName,
},
},
},
},
},
},
},
},
}
}
// NewDeploymentReferencesSecret will build a deployment object that reference secret.
func NewDeploymentReferencesSecret(namespace, deploymentName, secretName string) *appsv1.Deployment {
podLabels := map[string]string{"app": "nginx"}
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: deploymentName,
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32Ptr(3),
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx:1.19.0",
}},
Volumes: []corev1.Volume{
{
Name: "vol-secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
},
},
},
},
},
}
}
// NewSecret will build a secret object.
func NewSecret(namespace string, name string, data map[string][]byte) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: data,
}
}
// NewConfigMap will build a configmap object.
func NewConfigMap(namespace string, name string, data map[string]string) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: data,
}
}