add e2e test for taint toleration schedule
Signed-off-by: lihanbo <lihanbo2@huawei.com>
This commit is contained in:
parent
08fdfc134a
commit
9834f8b981
|
@ -33,10 +33,11 @@ func (p *TaintToleration) Name() string {
|
|||
return Name
|
||||
}
|
||||
|
||||
// Filter checks if the propagation policy tolerates a cluster's taints.
|
||||
// Filter checks if the given tolerations in placement tolerate cluster's taints.
|
||||
func (p *TaintToleration) Filter(ctx context.Context, placement *v1alpha1.Placement, cluster *cluster.Cluster) *framework.Result {
|
||||
filterPredicate := func(t *v1.Taint) bool {
|
||||
// only interested in NoSchedule taints.
|
||||
// now only interested in NoSchedule taint which means do not allow new resource to schedule onto the cluster unless they tolerate the taint
|
||||
// todo: supprot NoExecute taint
|
||||
return t.Effect == v1.TaintEffectNoSchedule
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ var _ = ginkgo.Describe("failover testing", func() {
|
|||
})
|
||||
})
|
||||
|
||||
// invalidateCluster will set wrong API endpoint of current cluster
|
||||
// disableCluster will set wrong API endpoint of current cluster
|
||||
func disableCluster(c client.Client, clusterName string) error {
|
||||
err := wait.Poll(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("propagation with taint and toleration testing", func() {
|
||||
ginkgo.Context("Deployment propagation testing", func() {
|
||||
policyNamespace := testNamespace
|
||||
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
|
||||
deploymentNamespace := testNamespace
|
||||
deploymentName := policyName
|
||||
deployment := helper.NewDeployment(deploymentNamespace, deploymentName)
|
||||
tolerationKey := "cluster-toleration.karmada.io"
|
||||
tolerationValue := "member1"
|
||||
|
||||
// set clusterTolerations to tolerate taints in member1.
|
||||
clusterTolerations := []corev1.Toleration{
|
||||
{
|
||||
Key: tolerationKey,
|
||||
Operator: corev1.TolerationOpEqual,
|
||||
Value: tolerationValue,
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
policy := helper.NewPolicyWithClusterToleration(policyNamespace, policyName, deployment, clusterNames, clusterTolerations)
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("creating policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
_, err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Create(context.TODO(), policy, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("adding taints to clusters", func() {
|
||||
for _, cluster := range clusterNames {
|
||||
fmt.Printf("add taints to cluster %v", cluster)
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
err := controlPlaneClient.Get(context.TODO(), client.ObjectKey{Name: cluster}, clusterObj)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
clusterObj.Spec.Taints = []corev1.Taint{
|
||||
{
|
||||
Key: tolerationKey,
|
||||
Value: clusterObj.Name,
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
err = controlPlaneClient.Update(context.TODO(), clusterObj)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("removing policy(%s/%s)", policyNamespace, policyName), func() {
|
||||
err := karmadaClient.PolicyV1alpha1().PropagationPolicies(policyNamespace).Delete(context.TODO(), policyName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("removing taints in cluster", func() {
|
||||
for _, cluster := range clusterNames {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
err := controlPlaneClient.Get(context.TODO(), client.ObjectKey{Name: cluster}, clusterObj)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
clusterObj.Spec.Taints = nil
|
||||
err = controlPlaneClient.Update(context.TODO(), clusterObj)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("deployment with cluster tolerations testing", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
_, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("check if deployment(%s/%s) only scheduled to tolerated cluster(%s)", deploymentNamespace, deploymentName, tolerationValue), func() {
|
||||
targetClusterNames, err := getTargetClusterNames(deployment)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Expect(len(targetClusterNames) == 1).Should(gomega.BeTrue())
|
||||
gomega.Expect(targetClusterNames[0] == tolerationValue).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("removing deployment(%s/%s)", deploymentNamespace, deploymentName), func() {
|
||||
err := kubeClient.AppsV1().Deployments(testNamespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
|
@ -33,6 +33,11 @@ func NewPolicyWithGroupsDeployment(namespace, name string, deployment *appsv1.De
|
|||
return newGroupsConstraintsPolicy(namespace, name, deployment.APIVersion, deployment.Kind, deployment.Name, maxGroups, minGroups, clusterLabels)
|
||||
}
|
||||
|
||||
// NewPolicyWithClusterToleration will build a PropagationPolicy object.
|
||||
func NewPolicyWithClusterToleration(namespace, name string, deployment *appsv1.Deployment, clusters []string, clusterTolerations []corev1.Toleration) *policyv1alpha1.PropagationPolicy {
|
||||
return newPolicyWithClusterToleration(namespace, name, deployment.APIVersion, deployment.Kind, deployment.Name, clusters, clusterTolerations)
|
||||
}
|
||||
|
||||
// newPolicy will build a PropagationPolicy object.
|
||||
func newPolicy(namespace, policyName, apiVersion, kind, resourceName string, clusters []string) *policyv1alpha1.PropagationPolicy {
|
||||
return &policyv1alpha1.PropagationPolicy{
|
||||
|
@ -89,3 +94,28 @@ func newGroupsConstraintsPolicy(namespace, policyName, apiVersion, kind, resourc
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newPolicyWithClusterToleration will build a PropagationPolicy object with cluster toleration.
|
||||
func newPolicyWithClusterToleration(namespace, policyName, apiVersion, kind, resourceName string, clusters []string, clusterTolerations []corev1.Toleration) *policyv1alpha1.PropagationPolicy {
|
||||
return &policyv1alpha1.PropagationPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: policyName,
|
||||
},
|
||||
Spec: policyv1alpha1.PropagationSpec{
|
||||
ResourceSelectors: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: apiVersion,
|
||||
Kind: kind,
|
||||
Name: resourceName,
|
||||
},
|
||||
},
|
||||
Placement: policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: clusters,
|
||||
},
|
||||
ClusterTolerations: clusterTolerations,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue