Merge pull request #1737 from Poor12/e2e
[e2e]add e2e for jobStatus,daemonSetStatus,statefulSetStatus collection
This commit is contained in:
commit
4ea28ff236
|
@ -0,0 +1,28 @@
|
||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateDaemonSet create DaemonSet.
|
||||||
|
func CreateDaemonSet(client kubernetes.Interface, daemonSet *appsv1.DaemonSet) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Creating DaemonSet(%s/%s)", daemonSet.Namespace, daemonSet.Name), func() {
|
||||||
|
_, err := client.AppsV1().DaemonSets(daemonSet.Namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveDaemonSet delete DaemonSet.
|
||||||
|
func RemoveDaemonSet(client kubernetes.Interface, namespace, name string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Removing DaemonSet(%s/%s)", namespace, name), func() {
|
||||||
|
err := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateStatefulSet create StatefulSet.
|
||||||
|
func CreateStatefulSet(client kubernetes.Interface, statefulSet *appsv1.StatefulSet) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Creating StatefulSet(%s/%s)", statefulSet.Namespace, statefulSet.Name), func() {
|
||||||
|
_, err := client.AppsV1().StatefulSets(statefulSet.Namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveStatefulSet delete StatefulSet.
|
||||||
|
func RemoveStatefulSet(client kubernetes.Interface, namespace, name string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Removing StatefulSet(%s/%s)", namespace, name), func() {
|
||||||
|
err := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateStatefulSetReplicas update statefulSet's replicas.
|
||||||
|
func UpdateStatefulSetReplicas(client kubernetes.Interface, statefulSet *appsv1.StatefulSet, replicas int32) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Updating StatefulSet(%s/%s)'s replicas to %d", statefulSet.Namespace, statefulSet.Name, replicas), func() {
|
||||||
|
statefulSet.Spec.Replicas = &replicas
|
||||||
|
gomega.Eventually(func() error {
|
||||||
|
_, err := client.AppsV1().StatefulSets(statefulSet.Namespace).Update(context.TODO(), statefulSet, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
|
@ -8,9 +8,11 @@ import (
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
@ -252,4 +254,248 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("JobStatus collection testing", func() {
|
||||||
|
var jobNamespace, jobName string
|
||||||
|
var job *batchv1.Job
|
||||||
|
var patch []map[string]interface{}
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policyNamespace = testNamespace
|
||||||
|
policyName = jobNamePrefix + rand.String(RandomStrLength)
|
||||||
|
jobNamespace = testNamespace
|
||||||
|
jobName = policyName
|
||||||
|
|
||||||
|
job = helper.NewJob(jobNamespace, jobName)
|
||||||
|
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
|
{
|
||||||
|
APIVersion: job.APIVersion,
|
||||||
|
Kind: job.Kind,
|
||||||
|
Name: job.Name,
|
||||||
|
},
|
||||||
|
}, policyv1alpha1.Placement{
|
||||||
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
|
ClusterNames: framework.ClusterNames(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
patch = []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"op": "replace",
|
||||||
|
"path": "/spec/placement/clusterAffinity/clusterNames",
|
||||||
|
"value": framework.ClusterNames()[0 : len(framework.ClusterNames())-1],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
framework.CreateJob(kubeClient, job)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveJob(kubeClient, jobNamespace, jobName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("job status collection testing", func() {
|
||||||
|
ginkgo.By("check whether the job status can be correctly collected", func() {
|
||||||
|
wantedSucceedPods := int32(len(framework.Clusters()))
|
||||||
|
|
||||||
|
klog.Infof("Waiting for job(%s/%s) collecting correctly status", jobNamespace, jobName)
|
||||||
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||||
|
currentJob, err := kubeClient.BatchV1().Jobs(jobNamespace).Get(context.TODO(), jobName, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
klog.Infof("job(%s/%s) succeedPods: %d, wanted succeedPods: %d", jobNamespace, jobName, currentJob.Status.Succeeded, wantedSucceedPods)
|
||||||
|
if currentJob.Status.Succeeded == wantedSucceedPods {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
framework.PatchPropagationPolicy(karmadaClient, policy.Namespace, policyName, patch, types.JSONPatchType)
|
||||||
|
|
||||||
|
ginkgo.By("check if job status has been update with new collection", func() {
|
||||||
|
wantedSucceedPods := int32(len(framework.Clusters()) - 1)
|
||||||
|
|
||||||
|
klog.Infof("Waiting for job(%s/%s) collecting correctly status", jobNamespace, jobName)
|
||||||
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||||
|
currentJob, err := kubeClient.BatchV1().Jobs(jobNamespace).Get(context.TODO(), jobName, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
if currentJob.Status.Succeeded == wantedSucceedPods {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("DaemonSetStatus collection testing", func() {
|
||||||
|
var daemonSetNamespace, daemonSetName string
|
||||||
|
var daemonSet *appsv1.DaemonSet
|
||||||
|
var patch []map[string]interface{}
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policyNamespace = testNamespace
|
||||||
|
policyName = daemonSetNamePrefix + rand.String(RandomStrLength)
|
||||||
|
daemonSetNamespace = testNamespace
|
||||||
|
daemonSetName = policyName
|
||||||
|
|
||||||
|
daemonSet = helper.NewDaemonSet(daemonSetNamespace, daemonSetName)
|
||||||
|
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
|
{
|
||||||
|
APIVersion: daemonSet.APIVersion,
|
||||||
|
Kind: daemonSet.Kind,
|
||||||
|
Name: daemonSet.Name,
|
||||||
|
},
|
||||||
|
}, policyv1alpha1.Placement{
|
||||||
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
|
ClusterNames: framework.ClusterNames(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
patch = []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"op": "replace",
|
||||||
|
"path": "/spec/placement/clusterAffinity/clusterNames",
|
||||||
|
"value": framework.ClusterNames()[0 : len(framework.ClusterNames())-1],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
framework.CreateDaemonSet(kubeClient, daemonSet)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveDaemonSet(kubeClient, daemonSetNamespace, daemonSetName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("daemonSet status collection testing", func() {
|
||||||
|
ginkgo.By("check whether the daemonSet status can be correctly collected", func() {
|
||||||
|
wantedReplicas := int32(len(framework.Clusters()))
|
||||||
|
|
||||||
|
klog.Infof("Waiting for daemonSet(%s/%s) collecting correctly status", daemonSetNamespace, daemonSetName)
|
||||||
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||||
|
currentDaemonSet, err := kubeClient.AppsV1().DaemonSets(daemonSetNamespace).Get(context.TODO(), daemonSetName, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
klog.Infof("daemonSet(%s/%s) replicas: %d, wanted replicas: %d", daemonSetNamespace, daemonSetName, currentDaemonSet.Status.NumberReady, wantedReplicas)
|
||||||
|
if currentDaemonSet.Status.NumberReady == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.CurrentNumberScheduled == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.DesiredNumberScheduled == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.UpdatedNumberScheduled == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.NumberAvailable == wantedReplicas {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
framework.PatchPropagationPolicy(karmadaClient, policy.Namespace, policyName, patch, types.JSONPatchType)
|
||||||
|
|
||||||
|
ginkgo.By("check if daemonSet status has been update with new collection", func() {
|
||||||
|
wantedReplicas := int32(len(framework.Clusters()) - 1)
|
||||||
|
|
||||||
|
klog.Infof("Waiting for daemonSet(%s/%s) collecting correctly status", daemonSetNamespace, daemonSetName)
|
||||||
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||||
|
currentDaemonSet, err := kubeClient.AppsV1().DaemonSets(daemonSetNamespace).Get(context.TODO(), daemonSetName, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
if currentDaemonSet.Status.NumberReady == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.CurrentNumberScheduled == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.DesiredNumberScheduled == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.UpdatedNumberScheduled == wantedReplicas &&
|
||||||
|
currentDaemonSet.Status.NumberAvailable == wantedReplicas {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("StatefulSetStatus collection testing", func() {
|
||||||
|
var statefulSetNamespace, statefulSetName string
|
||||||
|
var statefulSet *appsv1.StatefulSet
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policyNamespace = testNamespace
|
||||||
|
policyName = statefulSetNamePrefix + rand.String(RandomStrLength)
|
||||||
|
statefulSetNamespace = testNamespace
|
||||||
|
statefulSetName = policyName
|
||||||
|
|
||||||
|
statefulSet = helper.NewStatefulSet(statefulSetNamespace, statefulSetName)
|
||||||
|
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
|
{
|
||||||
|
APIVersion: statefulSet.APIVersion,
|
||||||
|
Kind: statefulSet.Kind,
|
||||||
|
Name: statefulSet.Name,
|
||||||
|
},
|
||||||
|
}, policyv1alpha1.Placement{
|
||||||
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
|
ClusterNames: framework.ClusterNames(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
framework.CreateStatefulSet(kubeClient, statefulSet)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveStatefulSet(kubeClient, statefulSetNamespace, statefulSetName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("statefulSet status collection testing", func() {
|
||||||
|
ginkgo.By("check whether the statefulSet status can be correctly collected", func() {
|
||||||
|
wantedReplicas := *statefulSet.Spec.Replicas * int32(len(framework.Clusters()))
|
||||||
|
klog.Infof("Waiting for statefulSet(%s/%s) collecting correctly status", statefulSetNamespace, statefulSetName)
|
||||||
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||||
|
currentStatefulSet, err := kubeClient.AppsV1().StatefulSets(statefulSetNamespace).Get(context.TODO(), statefulSetName, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
klog.Infof("statefulSet(%s/%s) replicas: %d, wanted replicas: %d", statefulSetNamespace, statefulSetName, currentStatefulSet.Status.Replicas, wantedReplicas)
|
||||||
|
if currentStatefulSet.Status.Replicas == wantedReplicas &&
|
||||||
|
currentStatefulSet.Status.ReadyReplicas == wantedReplicas &&
|
||||||
|
currentStatefulSet.Status.CurrentReplicas == wantedReplicas &&
|
||||||
|
currentStatefulSet.Status.UpdatedReplicas == wantedReplicas {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
framework.UpdateStatefulSetReplicas(kubeClient, statefulSet, updateStatefulSetReplicas)
|
||||||
|
|
||||||
|
ginkgo.By("check if statefulSet status has been update with new collection", func() {
|
||||||
|
wantedReplicas := updateStatefulSetReplicas * int32(len(framework.Clusters()))
|
||||||
|
|
||||||
|
klog.Infof("Waiting for statefulSet(%s/%s) collecting correctly status", statefulSetNamespace, statefulSetName)
|
||||||
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||||
|
currentStatefulSet, err := kubeClient.AppsV1().StatefulSets(statefulSetNamespace).Get(context.TODO(), statefulSetName, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
if currentStatefulSet.Status.Replicas == wantedReplicas &&
|
||||||
|
currentStatefulSet.Status.ReadyReplicas == wantedReplicas &&
|
||||||
|
currentStatefulSet.Status.CurrentReplicas == wantedReplicas &&
|
||||||
|
currentStatefulSet.Status.UpdatedReplicas == wantedReplicas {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -51,13 +51,16 @@ const (
|
||||||
configMapNamePrefix = "configmap-"
|
configMapNamePrefix = "configmap-"
|
||||||
secretNamePrefix = "secret-"
|
secretNamePrefix = "secret-"
|
||||||
ingressNamePrefix = "ingress-"
|
ingressNamePrefix = "ingress-"
|
||||||
|
daemonSetNamePrefix = "daemonset-"
|
||||||
|
statefulSetNamePrefix = "statefulset-"
|
||||||
|
|
||||||
updateDeploymentReplicas = 6
|
updateDeploymentReplicas = 6
|
||||||
updateServicePort = 81
|
updateStatefulSetReplicas = 6
|
||||||
updatePodImage = "nginx:latest"
|
updateServicePort = 81
|
||||||
updateCRnamespace = "e2e-test"
|
updatePodImage = "nginx:latest"
|
||||||
updateBackoffLimit = 3
|
updateCRnamespace = "e2e-test"
|
||||||
updateParallelism = 3
|
updateBackoffLimit = 3
|
||||||
|
updateParallelism = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -62,6 +62,71 @@ func NewDeployment(namespace string, name string) *appsv1.Deployment {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewDaemonSet will build a daemonSet object.
|
||||||
|
func NewDaemonSet(namespace string, name string) *appsv1.DaemonSet {
|
||||||
|
podLabels := map[string]string{"app": "nginx"}
|
||||||
|
|
||||||
|
return &appsv1.DaemonSet{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "apps/v1",
|
||||||
|
Kind: "DaemonSet",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: appsv1.DaemonSetSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: podLabels,
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: podLabels,
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{{
|
||||||
|
Name: "nginx",
|
||||||
|
Image: "nginx:1.19.0",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStatefulSet will build a statefulSet object.
|
||||||
|
func NewStatefulSet(namespace string, name string) *appsv1.StatefulSet {
|
||||||
|
podLabels := map[string]string{"app": "nginx"}
|
||||||
|
|
||||||
|
return &appsv1.StatefulSet{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "apps/v1",
|
||||||
|
Kind: "StatefulSet",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: appsv1.StatefulSetSpec{
|
||||||
|
Replicas: pointer.Int32Ptr(3),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: podLabels,
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: podLabels,
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{{
|
||||||
|
Name: "nginx",
|
||||||
|
Image: "nginx:1.19.0",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewService will build a service object.
|
// NewService will build a service object.
|
||||||
func NewService(namespace string, name string) *corev1.Service {
|
func NewService(namespace string, name string) *corev1.Service {
|
||||||
return &corev1.Service{
|
return &corev1.Service{
|
||||||
|
|
Loading…
Reference in New Issue