Add label and spreadByConstraints E2E of CRD and deployment (#245)

Add E2E of label and group constraints

Signed-off-by: mabotao <1397247577@qq.com>
This commit is contained in:
tinyma123 2021-04-09 16:12:20 +08:00 committed by GitHub
parent f3de793adc
commit 46974327e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 451 additions and 11 deletions

View File

@ -28,6 +28,7 @@ spec:
- --kubeconfig=/etc/kubeconfig
- --bind-address=0.0.0.0
- --secure-port=10351
- --failover=true
volumeMounts:
- name: kubeconfig
subPath: kubeconfig

301
test/e2e/scheduling_test.go Normal file
View File

@ -0,0 +1,301 @@
package e2e
import (
"context"
"encoding/json"
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/helper"
)
// BasicPropagation focus on basic propagation functionality testing.
var _ = ginkgo.Describe("propagation with label and group constraints testing", func() {
ginkgo.Context("Deployment propagation testing", func() {
var groupMatchedClusters []*clusterv1alpha1.Cluster
var targetClusterNames []string
policyNamespace := testNamespace
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
deploymentNamespace := testNamespace
deploymentName := policyName
deployment := helper.NewDeployment(deploymentNamespace, deploymentName)
maxGroups := rand.Intn(2) + 1
minGroups := maxGroups
// set MaxGroups=MinGroups=1 or 2, label is location=CHN.
policy := helper.NewPolicyWithGroupsDeployment(policyNamespace, policyName, deployment, maxGroups, minGroups, clusterLabels)
ginkgo.BeforeEach(func() {
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
})
ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
})
ginkgo.It("deployment propagation with label and group constraints testing", func() {
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("collect the target clusters in resource binding", func() {
bindingName := names.GenerateBindingName(deployment.Kind, deployment.Name)
fmt.Printf("deploy kind is %s, name is %s\n", deployment.Kind, deployment.Name)
binding := &workv1alpha1.ResourceBinding{}
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
err = controlPlaneClient.Get(context.TODO(), client.ObjectKey{Namespace: deployment.Namespace, Name: bindingName}, binding)
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
fmt.Printf("MaxGroups= %v, MinGroups= %v\n", maxGroups, minGroups)
for _, cluster := range binding.Spec.Clusters {
targetClusterNames = append(targetClusterNames, cluster.Name)
}
fmt.Printf("target clusters in resource binding are %s\n", targetClusterNames)
gomega.Expect(len(targetClusterNames) == minGroups).ShouldNot(gomega.BeFalse())
})
ginkgo.By("check if deployment present on right clusters", func() {
for _, targetClusterName := range targetClusterNames {
clusterClient := getClusterClient(targetClusterName)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Check whether deployment(%s/%s) is present on cluster(%s)", deploymentNamespace, deploymentName, targetClusterName)
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
_, err = clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
targetCluster, _ := util.GetCluster(controlPlaneClient, targetClusterName)
groupMatchedClusters = append(groupMatchedClusters, targetCluster)
fmt.Printf("Deployment(%s/%s) is present on cluster(%s).\n", deploymentNamespace, deploymentName, targetClusterName)
return true, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
fmt.Printf("there are %d target clusters\n", len(groupMatchedClusters))
gomega.Expect(minGroups == len(groupMatchedClusters)).ShouldNot(gomega.BeFalse())
})
ginkgo.By("updating deployment", func() {
patch := map[string]interface{}{
"spec": map[string]interface{}{
"replicas": pointer.Int32Ptr(updateDeploymentReplicas),
},
}
bytes, err := json.Marshal(patch)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
_, err = kubeClient.AppsV1().Deployments(deploymentNamespace).Patch(context.TODO(), deploymentName, types.StrategicMergePatchType, bytes, metav1.PatchOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if update has been synced to member clusters", func() {
for _, cluster := range groupMatchedClusters {
clusterClient := getClusterClient(cluster.Name)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for deployment(%s/%s) synced on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
dep, err := clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
if *dep.Spec.Replicas == updateDeploymentReplicas {
return true, nil
}
return false, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
})
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if deployment has been deleted from member clusters", func() {
for _, cluster := range groupMatchedClusters {
clusterClient := getClusterClient(cluster.Name)
gomega.Expect(clusterClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for deployment(%s/%s) disappear on cluster(%s)", deploymentNamespace, deploymentName, cluster.Name)
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
_, err = clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
})
})
})
ginkgo.Context("CustomResourceDefinition propagation testing", func() {
var groupMatchedClusters []*clusterv1alpha1.Cluster
var targetClusterNames []string
crdGroup := fmt.Sprintf("example-%s.karmada.io", rand.String(RandomStrLength))
randStr := rand.String(RandomStrLength)
crdSpecNames := apiextensionsv1.CustomResourceDefinitionNames{
Kind: fmt.Sprintf("Foo%s", randStr),
ListKind: fmt.Sprintf("Foo%sList", randStr),
Plural: fmt.Sprintf("foo%ss", randStr),
Singular: fmt.Sprintf("foo%s", randStr),
}
crd := helper.NewCustomResourceDefinition(crdGroup, crdSpecNames, apiextensionsv1.NamespaceScoped)
maxGroups := rand.Intn(2) + 1
minGroups := maxGroups
// set MaxGroups=MinGroups=1 or 2, label is location=CHN.
crdPolicy := helper.NewConstraintsPolicyWithSingleCRD(crd.Name, crd, maxGroups, minGroups, clusterLabels)
crdGVR := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
ginkgo.BeforeEach(func() {
ginkgo.By(fmt.Sprintf("creating crdPolicy(%s)", crdPolicy.Name), func() {
_, err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), crdPolicy, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
})
ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("removing crdPolicy(%s)", crdPolicy.Name), func() {
err := karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Delete(context.TODO(), crdPolicy.Name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
})
ginkgo.It("crd with specified label and group constraints propagation testing", func() {
ginkgo.By(fmt.Sprintf("creating crd(%s)", crd.Name), func() {
unstructObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(crd)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
_, err = dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Create(context.TODO(), &unstructured.Unstructured{Object: unstructObj}, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By(fmt.Sprintf("get crd(%s)", crd.Name), func() {
_, err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("collect the target clusters in cluster resource binding", func() {
bindingName := names.GenerateBindingName(crd.Kind, crd.Name)
fmt.Printf("crd kind is %s, name is %s\n", crd.Kind, crd.Name)
binding := &workv1alpha1.ClusterResourceBinding{}
fmt.Printf("MaxGroups= %v, MinGroups= %v\n", maxGroups, minGroups)
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
err = controlPlaneClient.Get(context.TODO(), client.ObjectKey{Name: bindingName}, binding)
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
for _, cluster := range binding.Spec.Clusters {
targetClusterNames = append(targetClusterNames, cluster.Name)
}
fmt.Printf("target clusters in cluster resource binding are %s\n", targetClusterNames)
gomega.Expect(len(targetClusterNames) == minGroups).ShouldNot(gomega.BeFalse())
})
ginkgo.By("check if crd present on right clusters", func() {
for _, targetClusterName := range targetClusterNames {
clusterDynamicClient := getClusterDynamicClient(targetClusterName)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for crd(%s) present on cluster(%s)", crd.Name, targetClusterName)
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
_, err = clusterDynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
targetCluster, _ := util.GetCluster(controlPlaneClient, targetClusterName)
groupMatchedClusters = append(groupMatchedClusters, targetCluster)
fmt.Printf("Crd(%s) is present on cluster(%s).\n", crd.Name, targetClusterName)
return true, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
fmt.Printf("there are %d target clusters\n", len(groupMatchedClusters))
gomega.Expect(minGroups == len(groupMatchedClusters)).ShouldNot(gomega.BeFalse())
})
ginkgo.By(fmt.Sprintf("removing crd(%s)", crd.Name), func() {
err := dynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("check if crd with specified label and group constraints disappeared from member clusters", func() {
for _, cluster := range groupMatchedClusters {
clusterDynamicClient := getClusterDynamicClient(cluster.Name)
gomega.Expect(clusterDynamicClient).ShouldNot(gomega.BeNil())
klog.Infof("Waiting for crd(%s) disappeared on cluster(%s)\n", crd.Name, cluster.Name)
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
_, err = clusterDynamicClient.Resource(crdGVR).Namespace(crd.Namespace).Get(context.TODO(), crd.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
})
})
})
})

View File

@ -10,8 +10,10 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@ -21,9 +23,10 @@ import (
"sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/exec"
clusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/test/helper"
)
@ -64,13 +67,14 @@ var (
karmadaClient karmada.Interface
dynamicClient dynamic.Interface
controlPlaneClient client.Client
clusters []*clusterapi.Cluster
clusters []*clusterv1alpha1.Cluster
clusterNames []string
clusterClients []*util.ClusterClient
clusterDynamicClients []*util.DynamicClusterClient
testNamespace = fmt.Sprintf("karmadatest-%s", rand.String(RandomStrLength))
clusterProvider *cluster.Provider
pullModeClusters map[string]string
clusterLabels = map[string]string{"location": "CHN"}
)
func TestE2E(t *testing.T) {
@ -96,8 +100,7 @@ var _ = ginkgo.BeforeSuite(func() {
dynamicClient, err = dynamic.NewForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
controlPlaneClient, err = client.New(restConfig, client.Options{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
controlPlaneClient = gclient.NewForConfigOrDie(restConfig)
pullModeClusters, err = fetchPullBasedClusters()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
@ -116,15 +119,28 @@ var _ = ginkgo.BeforeSuite(func() {
clusterNames = append(clusterNames, cluster.Name)
clusterClients = append(clusterClients, clusterClient)
clusterDynamicClients = append(clusterDynamicClients, clusterDynamicClient)
}
err = SetClusterLabel(controlPlaneClient, cluster.Name)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
gomega.Expect(clusterNames).Should(gomega.HaveLen(len(clusters)))
clusters, err = fetchClusters(karmadaClient)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
fmt.Printf("There are %d clusters\n", len(clusters))
err = setupTestNamespace(testNamespace, kubeClient, clusterClients)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}, TestSuiteSetupTimeOut.Seconds())
var _ = ginkgo.AfterSuite(func() {
// cleanup clusterLabels set by the E2E test
for _, cluster := range clusters {
err := DeleteClusterLabel(controlPlaneClient, cluster.Name)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
// cleanup all namespaces we created both in control plane and member clusters.
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
err := cleanupTestNamespace(testNamespace, kubeClient, clusterClients)
@ -151,16 +167,16 @@ func fetchPullBasedClusters() (map[string]string, error) {
}
// fetchClusters will fetch all member clusters we have.
func fetchClusters(client karmada.Interface) ([]*clusterapi.Cluster, error) {
func fetchClusters(client karmada.Interface) ([]*clusterv1alpha1.Cluster, error) {
clusterList, err := client.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
clusters := make([]*clusterapi.Cluster, 0, len(clusterList.Items))
clusters := make([]*clusterv1alpha1.Cluster, 0, len(clusterList.Items))
for _, cluster := range clusterList.Items {
pinedCluster := cluster
if pinedCluster.Spec.SyncMode == clusterapi.Pull {
if pinedCluster.Spec.SyncMode == clusterv1alpha1.Pull {
if _, exist := pullModeClusters[cluster.Name]; !exist {
continue
}
@ -172,7 +188,7 @@ func fetchClusters(client karmada.Interface) ([]*clusterapi.Cluster, error) {
}
// isClusterMeetRequirements checks if current environment meet the requirements of E2E.
func isClusterMeetRequirements(clusters []*clusterapi.Cluster) (bool, error) {
func isClusterMeetRequirements(clusters []*clusterv1alpha1.Cluster) (bool, error) {
// check if member cluster number meets requirements
if len(clusters) < MinimumCluster {
return false, fmt.Errorf("needs at lease %d member cluster to run, but got: %d", MinimumCluster, len(clusters))
@ -282,8 +298,8 @@ func deleteCluster(clusterName, kubeConfigPath string) error {
return clusterProvider.Delete(clusterName, kubeConfigPath)
}
func newClusterClientSet(c *clusterapi.Cluster) (*util.ClusterClient, *util.DynamicClusterClient, error) {
if c.Spec.SyncMode == clusterapi.Push {
func newClusterClientSet(c *clusterv1alpha1.Cluster) (*util.ClusterClient, *util.DynamicClusterClient, error) {
if c.Spec.SyncMode == clusterv1alpha1.Push {
clusterClient, err := util.NewClusterClientSet(c, controlPlaneClient)
if err != nil {
return nil, nil, err
@ -308,3 +324,50 @@ func newClusterClientSet(c *clusterapi.Cluster) (*util.ClusterClient, *util.Dyna
return &clusterClientSet, &clusterDynamicClientSet, nil
}
// set cluster label of E2E
func SetClusterLabel(c client.Client, clusterName string) error {
err := wait.Poll(2*time.Second, 10*time.Second, func() (done bool, err error) {
clusterObj := &clusterv1alpha1.Cluster{}
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
if errors.IsConflict(err) {
return false, nil
}
return false, err
}
if clusterObj.Labels == nil {
clusterObj.Labels = make(map[string]string)
}
clusterObj.Labels["location"] = "CHN"
if err := c.Update(context.TODO(), clusterObj); err != nil {
if errors.IsConflict(err) {
return false, nil
}
return false, err
}
return true, nil
})
return err
}
// delete cluster label of E2E
func DeleteClusterLabel(c client.Client, clusterName string) error {
err := wait.Poll(2*time.Second, 10*time.Second, func() (done bool, err error) {
clusterObj := &clusterv1alpha1.Cluster{}
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
if errors.IsConflict(err) {
return false, nil
}
return false, err
}
delete(clusterObj.Labels, "location")
if err := c.Update(context.TODO(), clusterObj); err != nil {
if errors.IsConflict(err) {
return false, nil
}
return false, err
}
return true, nil
})
return err
}

View File

@ -12,6 +12,11 @@ func NewPolicyWithSingleCRD(name string, crd *apiextensionsv1.CustomResourceDefi
return newClusterPolicy(name, crd.APIVersion, crd.Kind, crd.Name, clusters)
}
// NewConstraintsPolicyWithSingleCRD will build a ClusterPropagationPolicy object with specified label and group constraints.
func NewConstraintsPolicyWithSingleCRD(name string, crd *apiextensionsv1.CustomResourceDefinition, maxGroups, minGroups int, clusterLabels map[string]string) *policyv1alpha1.ClusterPropagationPolicy {
return newConstraintsClusterPolicy(name, crd.APIVersion, crd.Kind, crd.Name, maxGroups, minGroups, clusterLabels)
}
// newClusterPolicy will build a ClusterPropagationPolicy object.
func newClusterPolicy(policyName, apiVersion, kind, resourceName string, clusters []string) *policyv1alpha1.ClusterPropagationPolicy {
return &policyv1alpha1.ClusterPropagationPolicy{
@ -34,3 +39,35 @@ func newClusterPolicy(policyName, apiVersion, kind, resourceName string, cluster
},
}
}
// newConstraintsClusterPolicy will build a ClusterPropagationPolicy object with label of cluster and spreadConstraints.
func newConstraintsClusterPolicy(policyName, apiVersion, kind, resourceName string, maxGroups, minGroups int, clusterLabels map[string]string) *policyv1alpha1.ClusterPropagationPolicy {
return &policyv1alpha1.ClusterPropagationPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
Spec: policyv1alpha1.PropagationSpec{
ResourceSelectors: []policyv1alpha1.ResourceSelector{
{
APIVersion: apiVersion,
Kind: kind,
Name: resourceName,
},
},
Placement: policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
LabelSelector: &metav1.LabelSelector{
MatchLabels: clusterLabels,
},
},
SpreadConstraints: []policyv1alpha1.SpreadConstraint{
{
SpreadByField: policyv1alpha1.SpreadByFieldCluster,
MaxGroups: maxGroups,
MinGroups: minGroups,
},
},
},
},
}
}

View File

@ -28,6 +28,11 @@ func NewPolicyWithSingleCR(namespace, name, crAPIVersion, crKind, crName string,
return newPolicy(namespace, name, crAPIVersion, crKind, crName, clusters)
}
// NewPolicyWithGroupsDeployment will build a PropagationPolicy object.
func NewPolicyWithGroupsDeployment(namespace, name string, deployment *appsv1.Deployment, maxGroups, minGroups int, clusterLabels map[string]string) *policyv1alpha1.PropagationPolicy {
return newGroupsConstraintsPolicy(namespace, name, deployment.APIVersion, deployment.Kind, deployment.Name, maxGroups, minGroups, clusterLabels)
}
// newPolicy will build a PropagationPolicy object.
func newPolicy(namespace, policyName, apiVersion, kind, resourceName string, clusters []string) *policyv1alpha1.PropagationPolicy {
return &policyv1alpha1.PropagationPolicy{
@ -51,3 +56,36 @@ func newPolicy(namespace, policyName, apiVersion, kind, resourceName string, clu
},
}
}
// newGroupsConstraintsPolicy will build a PropagationPolicy object with label of cluster and spreadConstraints.
func newGroupsConstraintsPolicy(namespace, policyName, apiVersion, kind, resourceName string, maxGroups, minGroups int, clusterLabels map[string]string) *policyv1alpha1.PropagationPolicy {
return &policyv1alpha1.PropagationPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: policyName,
},
Spec: policyv1alpha1.PropagationSpec{
ResourceSelectors: []policyv1alpha1.ResourceSelector{
{
APIVersion: apiVersion,
Kind: kind,
Name: resourceName,
},
},
Placement: policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
LabelSelector: &metav1.LabelSelector{
MatchLabels: clusterLabels,
},
},
SpreadConstraints: []policyv1alpha1.SpreadConstraint{
{
SpreadByField: policyv1alpha1.SpreadByFieldCluster,
MaxGroups: maxGroups,
MinGroups: minGroups,
},
},
},
},
}
}