Merge pull request #1665 from XiShanYongYe-Chang/add-e2e-for-resource-status-collection

[E2E] add e2e for serviceStatus collection
This commit is contained in:
karmada-bot 2022-04-27 16:36:15 +08:00 committed by GitHub
commit 9d0c1fb679
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 80 additions and 0 deletions

View File

@ -2,10 +2,13 @@ package e2e
import (
"context"
"fmt"
"reflect"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
@ -98,4 +101,81 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
})
})
})
ginkgo.Context("ServiceStatus collection testing", func() {
var policyNamespace, policyName string
var serviceNamespace, serviceName string
var service *corev1.Service
var policy *policyv1alpha1.PropagationPolicy
ginkgo.BeforeEach(func() {
policyNamespace = testNamespace
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
serviceNamespace = testNamespace
serviceName = policyName
service = helper.NewService(serviceNamespace, serviceName)
service.Spec.Type = corev1.ServiceTypeLoadBalancer
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: service.APIVersion,
Kind: service.Kind,
Name: service.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
})
})
ginkgo.BeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateService(kubeClient, service)
ginkgo.DeferCleanup(func() {
framework.RemoveService(kubeClient, serviceNamespace, serviceName)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})
ginkgo.It("service status collection testing", func() {
svcLoadBalancer := corev1.LoadBalancerStatus{}
// simulate the update of the service status in member clusters.
ginkgo.By("Update service status in member clusters", func() {
for index, clusterName := range framework.ClusterNames() {
clusterClient := framework.GetClusterClient(clusterName)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
ingresses := []corev1.LoadBalancerIngress{{IP: fmt.Sprintf("172.19.1.%d", index+6)}}
for _, ingress := range ingresses {
svcLoadBalancer.Ingress = append(svcLoadBalancer.Ingress, corev1.LoadBalancerIngress{
IP: ingress.IP,
Hostname: clusterName,
})
}
gomega.Eventually(func(g gomega.Gomega) {
memberSvc, err := clusterClient.CoreV1().Services(serviceNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
memberSvc.Status.LoadBalancer = corev1.LoadBalancerStatus{Ingress: ingresses}
_, err = clusterClient.CoreV1().Services(serviceNamespace).UpdateStatus(context.TODO(), memberSvc, metav1.UpdateOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
}, pollTimeout, pollInterval).Should(gomega.Succeed())
}
})
klog.Infof("svcLoadBalancer: %v", svcLoadBalancer)
ginkgo.By("check if service status has been update whit collection", func() {
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
latestSvc, err := kubeClient.CoreV1().Services(serviceNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
klog.Infof("the latest serviceStatus loadBalancer: %v", latestSvc.Status.LoadBalancer)
return reflect.DeepEqual(latestSvc.Status.LoadBalancer, svcLoadBalancer), nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
})
})
})
})