From 695c29fe61ee172574ffd15776052e571acc28c3 Mon Sep 17 00:00:00 2001 From: changzhen Date: Thu, 28 Apr 2022 10:24:34 +0800 Subject: [PATCH] add e2e for ingressStatus collection Signed-off-by: changzhen --- .../defaultinterpreter/aggregatestatus.go | 8 +- .../aggregatestatus_test.go | 6 +- .../defaultinterpreter/reflectstatus.go | 4 +- pkg/util/helper/unstructured.go | 6 +- test/e2e/framework/ingress.go | 28 ++++++ test/e2e/resource_test.go | 94 +++++++++++++++++-- test/e2e/suite_test.go | 1 + test/helper/resource.go | 35 +++++++ 8 files changed, 160 insertions(+), 22 deletions(-) create mode 100644 test/e2e/framework/ingress.go diff --git a/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus.go b/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus.go index 4ef7565ef..6a714f67e 100644 --- a/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus.go +++ b/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus.go @@ -7,7 +7,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" @@ -23,7 +23,7 @@ func getAllDefaultAggregateStatusInterpreter() map[schema.GroupVersionKind]aggre s := make(map[schema.GroupVersionKind]aggregateStatusInterpreter) s[appsv1.SchemeGroupVersion.WithKind(util.DeploymentKind)] = aggregateDeploymentStatus s[corev1.SchemeGroupVersion.WithKind(util.ServiceKind)] = aggregateServiceStatus - s[extensionsv1beta1.SchemeGroupVersion.WithKind(util.IngressKind)] = aggregateIngressStatus + s[networkingv1.SchemeGroupVersion.WithKind(util.IngressKind)] = aggregateIngressStatus s[batchv1.SchemeGroupVersion.WithKind(util.JobKind)] = aggregateJobStatus s[appsv1.SchemeGroupVersion.WithKind(util.DaemonSetKind)] = aggregateDaemonSetStatus s[appsv1.SchemeGroupVersion.WithKind(util.StatefulSetKind)] = aggregateStatefulSetStatus @@ -122,12 +122,12 @@ func aggregateIngressStatus(object *unstructured.Unstructured, aggregatedStatusI return nil, err } - newStatus := &extensionsv1beta1.IngressStatus{} + newStatus := &networkingv1.IngressStatus{} for _, item := range aggregatedStatusItems { if item.Status == nil { continue } - temp := &extensionsv1beta1.IngressStatus{} + temp := &networkingv1.IngressStatus{} if err := json.Unmarshal(item.Status.Raw, temp); err != nil { klog.Errorf("Failed to unmarshal status ingress(%s/%s): %v", ingress.Namespace, ingress.Name, err) return nil, err diff --git a/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus_test.go b/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus_test.go index 0fdeb8517..e9f2649d8 100644 --- a/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus_test.go +++ b/pkg/resourceinterpreter/defaultinterpreter/aggregatestatus_test.go @@ -7,7 +7,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -140,8 +140,8 @@ func TestAggregateIngressStatus(t *testing.T) { {ClusterName: "member1", Status: raw, Applied: true}, } - oldIngress := &extensionsv1beta1.Ingress{} - newIngress := &extensionsv1beta1.Ingress{Status: extensionsv1beta1.IngressStatus{LoadBalancer: corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: "8.8.8.8", Hostname: "member1"}}}}} + oldIngress := &networkingv1.Ingress{} + newIngress := &networkingv1.Ingress{Status: networkingv1.IngressStatus{LoadBalancer: corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: "8.8.8.8", Hostname: "member1"}}}}} oldObj, _ := helper.ToUnstructured(oldIngress) newObj, _ := helper.ToUnstructured(newIngress) diff --git a/pkg/resourceinterpreter/defaultinterpreter/reflectstatus.go b/pkg/resourceinterpreter/defaultinterpreter/reflectstatus.go index c3e366568..1cfd9979e 100644 --- a/pkg/resourceinterpreter/defaultinterpreter/reflectstatus.go +++ b/pkg/resourceinterpreter/defaultinterpreter/reflectstatus.go @@ -6,7 +6,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -22,7 +22,7 @@ func getAllDefaultReflectStatusInterpreter() map[schema.GroupVersionKind]reflect s := make(map[schema.GroupVersionKind]reflectStatusInterpreter) s[appsv1.SchemeGroupVersion.WithKind(util.DeploymentKind)] = reflectDeploymentStatus s[corev1.SchemeGroupVersion.WithKind(util.ServiceKind)] = reflectServiceStatus - s[extensionsv1beta1.SchemeGroupVersion.WithKind(util.IngressKind)] = reflectIngressStatus + s[networkingv1.SchemeGroupVersion.WithKind(util.IngressKind)] = reflectIngressStatus s[batchv1.SchemeGroupVersion.WithKind(util.JobKind)] = reflectJobStatus s[appsv1.SchemeGroupVersion.WithKind(util.DaemonSetKind)] = reflectDaemonSetStatus s[appsv1.SchemeGroupVersion.WithKind(util.StatefulSetKind)] = reflectStatefulSetStatus diff --git a/pkg/util/helper/unstructured.go b/pkg/util/helper/unstructured.go index cd8eb9157..baeaf91f3 100644 --- a/pkg/util/helper/unstructured.go +++ b/pkg/util/helper/unstructured.go @@ -5,7 +5,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -196,8 +196,8 @@ func ConvertToServiceStatus(obj map[string]interface{}) (*corev1.ServiceStatus, } // ConvertToIngress converts an Ingress object from unstructured to typed. -func ConvertToIngress(obj *unstructured.Unstructured) (*extensionsv1beta1.Ingress, error) { - typedObj := &extensionsv1beta1.Ingress{} +func ConvertToIngress(obj *unstructured.Unstructured) (*networkingv1.Ingress, error) { + typedObj := &networkingv1.Ingress{} if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), typedObj); err != nil { return nil, err } diff --git a/test/e2e/framework/ingress.go b/test/e2e/framework/ingress.go new file mode 100644 index 000000000..1febb158d --- /dev/null +++ b/test/e2e/framework/ingress.go @@ -0,0 +1,28 @@ +package framework + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// CreateIngress create Ingress. +func CreateIngress(client kubernetes.Interface, ingress *networkingv1.Ingress) { + ginkgo.By(fmt.Sprintf("Creating Ingress(%s/%s)", ingress.Namespace, ingress.Name), func() { + _, err := client.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + }) +} + +// RemoveIngress delete Ingress. +func RemoveIngress(client kubernetes.Interface, namespace, name string) { + ginkgo.By(fmt.Sprintf("Removing Ingress(%s/%s)", namespace, name), func() { + err := client.NetworkingV1().Ingresses(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + }) +} diff --git a/test/e2e/resource_test.go b/test/e2e/resource_test.go index 0d5374f2f..073f5b6dd 100644 --- a/test/e2e/resource_test.go +++ b/test/e2e/resource_test.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" @@ -20,12 +21,19 @@ import ( ) var _ = ginkgo.Describe("[resource-status collection] resource status collection testing", func() { + var policyNamespace, policyName string + var policy *policyv1alpha1.PropagationPolicy + + ginkgo.JustBeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, policy) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) + }) + }) ginkgo.Context("DeploymentStatus collection testing", func() { - var policyNamespace, policyName string var deploymentNamespace, deploymentName string var deployment *appsv1.Deployment - var policy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { policyNamespace = testNamespace @@ -48,11 +56,9 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateDeployment(kubeClient, deployment) ginkgo.DeferCleanup(func() { framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) }) }) @@ -80,7 +86,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas) - ginkgo.By("check if deployment status has been update whit new collection", func() { + ginkgo.By("check if deployment status has been update with new collection", func() { wantedReplicas := updateDeploymentReplicas * int32(len(framework.Clusters())) klog.Infof("Waiting for deployment(%s/%s) collecting correctly status", deploymentNamespace, deploymentName) @@ -103,10 +109,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection }) ginkgo.Context("ServiceStatus collection testing", func() { - var policyNamespace, policyName string var serviceNamespace, serviceName string var service *corev1.Service - var policy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { policyNamespace = testNamespace @@ -130,11 +134,9 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateService(kubeClient, service) ginkgo.DeferCleanup(func() { framework.RemoveService(kubeClient, serviceNamespace, serviceName) - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) }) }) @@ -167,7 +169,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection }) klog.Infof("svcLoadBalancer: %v", svcLoadBalancer) - ginkgo.By("check if service status has been update whit collection", func() { + ginkgo.By("check if service status has been update with collection", func() { gomega.Eventually(func(g gomega.Gomega) (bool, error) { latestSvc, err := kubeClient.CoreV1().Services(serviceNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) g.Expect(err).NotTo(gomega.HaveOccurred()) @@ -178,4 +180,76 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection }) }) }) + + ginkgo.Context("IngressStatus collection testing", func() { + var ingNamespace, ingName string + var ingress *networkingv1.Ingress + + ginkgo.BeforeEach(func() { + policyNamespace = testNamespace + policyName = ingressNamePrefix + rand.String(RandomStrLength) + ingNamespace = testNamespace + ingName = policyName + + ingress = helper.NewIngress(ingNamespace, ingName) + policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: ingress.APIVersion, + Kind: ingress.Kind, + Name: ingress.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreateIngress(kubeClient, ingress) + ginkgo.DeferCleanup(func() { + framework.RemoveIngress(kubeClient, ingNamespace, ingName) + }) + }) + + ginkgo.It("ingress status collection testing", func() { + ingLoadBalancer := corev1.LoadBalancerStatus{} + + // simulate the update of the ingress status in member clusters. + ginkgo.By("Update ingress status in member clusters", func() { + for index, clusterName := range framework.ClusterNames() { + clusterClient := framework.GetClusterClient(clusterName) + gomega.Expect(clusterClient).ShouldNot(gomega.BeNil()) + + ingresses := []corev1.LoadBalancerIngress{{IP: fmt.Sprintf("172.19.2.%d", index+6)}} + for _, ingress := range ingresses { + ingLoadBalancer.Ingress = append(ingLoadBalancer.Ingress, corev1.LoadBalancerIngress{ + IP: ingress.IP, + Hostname: clusterName, + }) + } + + gomega.Eventually(func(g gomega.Gomega) { + memberIng, err := clusterClient.NetworkingV1().Ingresses(ingNamespace).Get(context.TODO(), ingName, metav1.GetOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + memberIng.Status.LoadBalancer = corev1.LoadBalancerStatus{Ingress: ingresses} + _, err = clusterClient.NetworkingV1().Ingresses(ingNamespace).UpdateStatus(context.TODO(), memberIng, metav1.UpdateOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + }, pollTimeout, pollInterval).Should(gomega.Succeed()) + } + }) + klog.Infof("ingLoadBalancer: %v", ingLoadBalancer) + + ginkgo.By("check if ingress status has been update with collection", func() { + gomega.Eventually(func(g gomega.Gomega) (bool, error) { + latestIng, err := kubeClient.NetworkingV1().Ingresses(ingNamespace).Get(context.TODO(), ingName, metav1.GetOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + klog.Infof("the latest ingressStatus loadBalancer: %v", latestIng.Status.LoadBalancer) + return reflect.DeepEqual(latestIng.Status.LoadBalancer, ingLoadBalancer), nil + }, pollTimeout, pollInterval).Should(gomega.Equal(true)) + }) + }) + }) }) diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 6c994b45b..2d257ba9c 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -50,6 +50,7 @@ const ( federatedResourceQuotaPrefix = "frq-" configMapNamePrefix = "configmap-" secretNamePrefix = "secret-" + ingressNamePrefix = "ingress-" updateDeploymentReplicas = 6 updateServicePort = 81 diff --git a/test/helper/resource.go b/test/helper/resource.go index 26a40c078..c53ad4c5f 100644 --- a/test/helper/resource.go +++ b/test/helper/resource.go @@ -6,6 +6,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -550,3 +551,37 @@ func NewClusterRoleBinding(name, roleRefName string, subject []rbacv1.Subject) * }, } } + +// NewIngress will build a new ingress object. +func NewIngress(namespace, name string) *networkingv1.Ingress { + nginxIngressClassName := "nginx" + pathTypePrefix := networkingv1.PathTypePrefix + return &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "Ingress", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: &nginxIngressClassName, + Rules: []networkingv1.IngressRule{ + { + Host: "www.demo.com", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/testpath", + PathType: &pathTypePrefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }}}}}}}}} +}