Merge pull request #1679 from XiShanYongYe-Chang/add-e2e-for-resource-status-collection
[E2E] add e2e for ingressStatus collection
This commit is contained in:
commit
40d61d2053
|
@ -7,7 +7,7 @@ import (
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
@ -23,7 +23,7 @@ func getAllDefaultAggregateStatusInterpreter() map[schema.GroupVersionKind]aggre
|
||||||
s := make(map[schema.GroupVersionKind]aggregateStatusInterpreter)
|
s := make(map[schema.GroupVersionKind]aggregateStatusInterpreter)
|
||||||
s[appsv1.SchemeGroupVersion.WithKind(util.DeploymentKind)] = aggregateDeploymentStatus
|
s[appsv1.SchemeGroupVersion.WithKind(util.DeploymentKind)] = aggregateDeploymentStatus
|
||||||
s[corev1.SchemeGroupVersion.WithKind(util.ServiceKind)] = aggregateServiceStatus
|
s[corev1.SchemeGroupVersion.WithKind(util.ServiceKind)] = aggregateServiceStatus
|
||||||
s[extensionsv1beta1.SchemeGroupVersion.WithKind(util.IngressKind)] = aggregateIngressStatus
|
s[networkingv1.SchemeGroupVersion.WithKind(util.IngressKind)] = aggregateIngressStatus
|
||||||
s[batchv1.SchemeGroupVersion.WithKind(util.JobKind)] = aggregateJobStatus
|
s[batchv1.SchemeGroupVersion.WithKind(util.JobKind)] = aggregateJobStatus
|
||||||
s[appsv1.SchemeGroupVersion.WithKind(util.DaemonSetKind)] = aggregateDaemonSetStatus
|
s[appsv1.SchemeGroupVersion.WithKind(util.DaemonSetKind)] = aggregateDaemonSetStatus
|
||||||
s[appsv1.SchemeGroupVersion.WithKind(util.StatefulSetKind)] = aggregateStatefulSetStatus
|
s[appsv1.SchemeGroupVersion.WithKind(util.StatefulSetKind)] = aggregateStatefulSetStatus
|
||||||
|
@ -122,12 +122,12 @@ func aggregateIngressStatus(object *unstructured.Unstructured, aggregatedStatusI
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newStatus := &extensionsv1beta1.IngressStatus{}
|
newStatus := &networkingv1.IngressStatus{}
|
||||||
for _, item := range aggregatedStatusItems {
|
for _, item := range aggregatedStatusItems {
|
||||||
if item.Status == nil {
|
if item.Status == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
temp := &extensionsv1beta1.IngressStatus{}
|
temp := &networkingv1.IngressStatus{}
|
||||||
if err := json.Unmarshal(item.Status.Raw, temp); err != nil {
|
if err := json.Unmarshal(item.Status.Raw, temp); err != nil {
|
||||||
klog.Errorf("Failed to unmarshal status ingress(%s/%s): %v", ingress.Namespace, ingress.Name, err)
|
klog.Errorf("Failed to unmarshal status ingress(%s/%s): %v", ingress.Namespace, ingress.Name, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
|
||||||
|
@ -140,8 +140,8 @@ func TestAggregateIngressStatus(t *testing.T) {
|
||||||
{ClusterName: "member1", Status: raw, Applied: true},
|
{ClusterName: "member1", Status: raw, Applied: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
oldIngress := &extensionsv1beta1.Ingress{}
|
oldIngress := &networkingv1.Ingress{}
|
||||||
newIngress := &extensionsv1beta1.Ingress{Status: extensionsv1beta1.IngressStatus{LoadBalancer: corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: "8.8.8.8", Hostname: "member1"}}}}}
|
newIngress := &networkingv1.Ingress{Status: networkingv1.IngressStatus{LoadBalancer: corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: "8.8.8.8", Hostname: "member1"}}}}}
|
||||||
oldObj, _ := helper.ToUnstructured(oldIngress)
|
oldObj, _ := helper.ToUnstructured(oldIngress)
|
||||||
newObj, _ := helper.ToUnstructured(newIngress)
|
newObj, _ := helper.ToUnstructured(newIngress)
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
@ -22,7 +22,7 @@ func getAllDefaultReflectStatusInterpreter() map[schema.GroupVersionKind]reflect
|
||||||
s := make(map[schema.GroupVersionKind]reflectStatusInterpreter)
|
s := make(map[schema.GroupVersionKind]reflectStatusInterpreter)
|
||||||
s[appsv1.SchemeGroupVersion.WithKind(util.DeploymentKind)] = reflectDeploymentStatus
|
s[appsv1.SchemeGroupVersion.WithKind(util.DeploymentKind)] = reflectDeploymentStatus
|
||||||
s[corev1.SchemeGroupVersion.WithKind(util.ServiceKind)] = reflectServiceStatus
|
s[corev1.SchemeGroupVersion.WithKind(util.ServiceKind)] = reflectServiceStatus
|
||||||
s[extensionsv1beta1.SchemeGroupVersion.WithKind(util.IngressKind)] = reflectIngressStatus
|
s[networkingv1.SchemeGroupVersion.WithKind(util.IngressKind)] = reflectIngressStatus
|
||||||
s[batchv1.SchemeGroupVersion.WithKind(util.JobKind)] = reflectJobStatus
|
s[batchv1.SchemeGroupVersion.WithKind(util.JobKind)] = reflectJobStatus
|
||||||
s[appsv1.SchemeGroupVersion.WithKind(util.DaemonSetKind)] = reflectDaemonSetStatus
|
s[appsv1.SchemeGroupVersion.WithKind(util.DaemonSetKind)] = reflectDaemonSetStatus
|
||||||
s[appsv1.SchemeGroupVersion.WithKind(util.StatefulSetKind)] = reflectStatefulSetStatus
|
s[appsv1.SchemeGroupVersion.WithKind(util.StatefulSetKind)] = reflectStatefulSetStatus
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
discoveryv1 "k8s.io/api/discovery/v1"
|
discoveryv1 "k8s.io/api/discovery/v1"
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
|
||||||
|
@ -196,8 +196,8 @@ func ConvertToServiceStatus(obj map[string]interface{}) (*corev1.ServiceStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvertToIngress converts an Ingress object from unstructured to typed.
|
// ConvertToIngress converts an Ingress object from unstructured to typed.
|
||||||
func ConvertToIngress(obj *unstructured.Unstructured) (*extensionsv1beta1.Ingress, error) {
|
func ConvertToIngress(obj *unstructured.Unstructured) (*networkingv1.Ingress, error) {
|
||||||
typedObj := &extensionsv1beta1.Ingress{}
|
typedObj := &networkingv1.Ingress{}
|
||||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), typedObj); err != nil {
|
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), typedObj); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateIngress create Ingress.
|
||||||
|
func CreateIngress(client kubernetes.Interface, ingress *networkingv1.Ingress) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Creating Ingress(%s/%s)", ingress.Namespace, ingress.Name), func() {
|
||||||
|
_, err := client.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveIngress delete Ingress.
|
||||||
|
func RemoveIngress(client kubernetes.Interface, namespace, name string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Removing Ingress(%s/%s)", namespace, name), func() {
|
||||||
|
err := client.NetworkingV1().Ingresses(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
@ -20,12 +21,19 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = ginkgo.Describe("[resource-status collection] resource status collection testing", func() {
|
var _ = ginkgo.Describe("[resource-status collection] resource status collection testing", func() {
|
||||||
|
var policyNamespace, policyName string
|
||||||
|
var policy *policyv1alpha1.PropagationPolicy
|
||||||
|
|
||||||
|
ginkgo.JustBeforeEach(func() {
|
||||||
|
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.Context("DeploymentStatus collection testing", func() {
|
ginkgo.Context("DeploymentStatus collection testing", func() {
|
||||||
var policyNamespace, policyName string
|
|
||||||
var deploymentNamespace, deploymentName string
|
var deploymentNamespace, deploymentName string
|
||||||
var deployment *appsv1.Deployment
|
var deployment *appsv1.Deployment
|
||||||
var policy *policyv1alpha1.PropagationPolicy
|
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
policyNamespace = testNamespace
|
policyNamespace = testNamespace
|
||||||
|
@ -48,11 +56,9 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
|
||||||
framework.CreateDeployment(kubeClient, deployment)
|
framework.CreateDeployment(kubeClient, deployment)
|
||||||
ginkgo.DeferCleanup(func() {
|
ginkgo.DeferCleanup(func() {
|
||||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -80,7 +86,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
|
|
||||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas)
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas)
|
||||||
|
|
||||||
ginkgo.By("check if deployment status has been update whit new collection", func() {
|
ginkgo.By("check if deployment status has been update with new collection", func() {
|
||||||
wantedReplicas := updateDeploymentReplicas * int32(len(framework.Clusters()))
|
wantedReplicas := updateDeploymentReplicas * int32(len(framework.Clusters()))
|
||||||
|
|
||||||
klog.Infof("Waiting for deployment(%s/%s) collecting correctly status", deploymentNamespace, deploymentName)
|
klog.Infof("Waiting for deployment(%s/%s) collecting correctly status", deploymentNamespace, deploymentName)
|
||||||
|
@ -103,10 +109,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("ServiceStatus collection testing", func() {
|
ginkgo.Context("ServiceStatus collection testing", func() {
|
||||||
var policyNamespace, policyName string
|
|
||||||
var serviceNamespace, serviceName string
|
var serviceNamespace, serviceName string
|
||||||
var service *corev1.Service
|
var service *corev1.Service
|
||||||
var policy *policyv1alpha1.PropagationPolicy
|
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
policyNamespace = testNamespace
|
policyNamespace = testNamespace
|
||||||
|
@ -130,11 +134,9 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
|
||||||
framework.CreateService(kubeClient, service)
|
framework.CreateService(kubeClient, service)
|
||||||
ginkgo.DeferCleanup(func() {
|
ginkgo.DeferCleanup(func() {
|
||||||
framework.RemoveService(kubeClient, serviceNamespace, serviceName)
|
framework.RemoveService(kubeClient, serviceNamespace, serviceName)
|
||||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -167,7 +169,7 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
})
|
})
|
||||||
klog.Infof("svcLoadBalancer: %v", svcLoadBalancer)
|
klog.Infof("svcLoadBalancer: %v", svcLoadBalancer)
|
||||||
|
|
||||||
ginkgo.By("check if service status has been update whit collection", func() {
|
ginkgo.By("check if service status has been update with collection", func() {
|
||||||
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
|
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
|
||||||
latestSvc, err := kubeClient.CoreV1().Services(serviceNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
latestSvc, err := kubeClient.CoreV1().Services(serviceNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||||
g.Expect(err).NotTo(gomega.HaveOccurred())
|
g.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
@ -178,4 +180,76 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("IngressStatus collection testing", func() {
|
||||||
|
var ingNamespace, ingName string
|
||||||
|
var ingress *networkingv1.Ingress
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policyNamespace = testNamespace
|
||||||
|
policyName = ingressNamePrefix + rand.String(RandomStrLength)
|
||||||
|
ingNamespace = testNamespace
|
||||||
|
ingName = policyName
|
||||||
|
|
||||||
|
ingress = helper.NewIngress(ingNamespace, ingName)
|
||||||
|
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
|
{
|
||||||
|
APIVersion: ingress.APIVersion,
|
||||||
|
Kind: ingress.Kind,
|
||||||
|
Name: ingress.Name,
|
||||||
|
},
|
||||||
|
}, policyv1alpha1.Placement{
|
||||||
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
|
ClusterNames: framework.ClusterNames(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
framework.CreateIngress(kubeClient, ingress)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveIngress(kubeClient, ingNamespace, ingName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("ingress status collection testing", func() {
|
||||||
|
ingLoadBalancer := corev1.LoadBalancerStatus{}
|
||||||
|
|
||||||
|
// simulate the update of the ingress status in member clusters.
|
||||||
|
ginkgo.By("Update ingress status in member clusters", func() {
|
||||||
|
for index, clusterName := range framework.ClusterNames() {
|
||||||
|
clusterClient := framework.GetClusterClient(clusterName)
|
||||||
|
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
|
||||||
|
|
||||||
|
ingresses := []corev1.LoadBalancerIngress{{IP: fmt.Sprintf("172.19.2.%d", index+6)}}
|
||||||
|
for _, ingress := range ingresses {
|
||||||
|
ingLoadBalancer.Ingress = append(ingLoadBalancer.Ingress, corev1.LoadBalancerIngress{
|
||||||
|
IP: ingress.IP,
|
||||||
|
Hostname: clusterName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
gomega.Eventually(func(g gomega.Gomega) {
|
||||||
|
memberIng, err := clusterClient.NetworkingV1().Ingresses(ingNamespace).Get(context.TODO(), ingName, metav1.GetOptions{})
|
||||||
|
g.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
memberIng.Status.LoadBalancer = corev1.LoadBalancerStatus{Ingress: ingresses}
|
||||||
|
_, err = clusterClient.NetworkingV1().Ingresses(ingNamespace).UpdateStatus(context.TODO(), memberIng, metav1.UpdateOptions{})
|
||||||
|
g.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
}, pollTimeout, pollInterval).Should(gomega.Succeed())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
klog.Infof("ingLoadBalancer: %v", ingLoadBalancer)
|
||||||
|
|
||||||
|
ginkgo.By("check if ingress status has been update with collection", func() {
|
||||||
|
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
|
||||||
|
latestIng, err := kubeClient.NetworkingV1().Ingresses(ingNamespace).Get(context.TODO(), ingName, metav1.GetOptions{})
|
||||||
|
g.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
klog.Infof("the latest ingressStatus loadBalancer: %v", latestIng.Status.LoadBalancer)
|
||||||
|
return reflect.DeepEqual(latestIng.Status.LoadBalancer, ingLoadBalancer), nil
|
||||||
|
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -50,6 +50,7 @@ const (
|
||||||
federatedResourceQuotaPrefix = "frq-"
|
federatedResourceQuotaPrefix = "frq-"
|
||||||
configMapNamePrefix = "configmap-"
|
configMapNamePrefix = "configmap-"
|
||||||
secretNamePrefix = "secret-"
|
secretNamePrefix = "secret-"
|
||||||
|
ingressNamePrefix = "ingress-"
|
||||||
|
|
||||||
updateDeploymentReplicas = 6
|
updateDeploymentReplicas = 6
|
||||||
updateServicePort = 81
|
updateServicePort = 81
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
@ -550,3 +551,37 @@ func NewClusterRoleBinding(name, roleRefName string, subject []rbacv1.Subject) *
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewIngress will build a new ingress object.
|
||||||
|
func NewIngress(namespace, name string) *networkingv1.Ingress {
|
||||||
|
nginxIngressClassName := "nginx"
|
||||||
|
pathTypePrefix := networkingv1.PathTypePrefix
|
||||||
|
return &networkingv1.Ingress{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "networking.k8s.io/v1",
|
||||||
|
Kind: "Ingress",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: networkingv1.IngressSpec{
|
||||||
|
IngressClassName: &nginxIngressClassName,
|
||||||
|
Rules: []networkingv1.IngressRule{
|
||||||
|
{
|
||||||
|
Host: "www.demo.com",
|
||||||
|
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||||
|
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||||
|
Paths: []networkingv1.HTTPIngressPath{
|
||||||
|
{
|
||||||
|
Path: "/testpath",
|
||||||
|
PathType: &pathTypePrefix,
|
||||||
|
Backend: networkingv1.IngressBackend{
|
||||||
|
Service: &networkingv1.IngressServiceBackend{
|
||||||
|
Name: "test",
|
||||||
|
Port: networkingv1.ServiceBackendPort{
|
||||||
|
Number: 80,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}}}}}}}}}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue