add e2e for WorkloadRebalancer

Signed-off-by: chaosi-zju <chaosi@zju.edu.cn>
This commit is contained in:
chaosi-zju 2024-04-25 09:46:00 +08:00
parent 6cfed59332
commit 9211d27973
5 changed files with 338 additions and 0 deletions

View File

@ -18,7 +18,11 @@ package framework
import ( import (
"context" "context"
"fmt"
"reflect"
"sort"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -36,3 +40,43 @@ func WaitResourceBindingFitWith(client karmada.Interface, namespace, name string
return fit(resourceBinding) return fit(resourceBinding)
}, pollTimeout, pollInterval).Should(gomega.Equal(true)) }, pollTimeout, pollInterval).Should(gomega.Equal(true))
} }
// AssertBindingScheduledClusters wait deployment present on member clusters sync with fit func.
// @expectedResults contains multiple possible results about expected clusters.
func AssertBindingScheduledClusters(client karmada.Interface, namespace, name string, expectedResults [][]string) {
ginkgo.By(fmt.Sprintf("Check ResourceBinding(%s/%s)'s target clusters is as expected", namespace, name), func() {
gomega.Eventually(func() error {
binding, err := client.WorkV1alpha2().ResourceBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return err
}
scheduledClutsers := make([]string, 0, len(binding.Spec.Clusters))
for _, scheduledCluster := range binding.Spec.Clusters {
scheduledClutsers = append(scheduledClutsers, scheduledCluster.Name)
}
sort.Strings(scheduledClutsers)
for _, expectedClutsers := range expectedResults {
if reflect.DeepEqual(scheduledClutsers, expectedClutsers) {
return nil
}
}
return fmt.Errorf("scheduled clusters: %+v, expected possible results: %+v", scheduledClutsers, expectedResults)
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
}
// WaitGracefulEvictionTasksDone wait GracefulEvictionTasks of the binding done.
func WaitGracefulEvictionTasksDone(client karmada.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Check ResourceBinding(%s/%s)'s GracefulEvictionTasks has been done", namespace, name), func() {
gomega.Eventually(func() error {
binding, err := client.WorkV1alpha2().ResourceBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return err
}
if len(binding.Spec.GracefulEvictionTasks) > 0 {
return fmt.Errorf("%d GracefulEvictionTasks is being precessing", len(binding.Spec.GracefulEvictionTasks))
}
return nil
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -0,0 +1,78 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"reflect"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
appsv1alpha1 "github.com/karmada-io/karmada/pkg/apis/apps/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
)
// CreateWorkloadRebalancer create WorkloadRebalancer with karmada client.
func CreateWorkloadRebalancer(client karmada.Interface, rebalancer *appsv1alpha1.WorkloadRebalancer) {
ginkgo.By(fmt.Sprintf("Creating WorkloadRebalancer(%s)", rebalancer.Name), func() {
newRebalancer, err := client.AppsV1alpha1().WorkloadRebalancers().Create(context.TODO(), rebalancer, metav1.CreateOptions{})
*rebalancer = *newRebalancer
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// RemoveWorkloadRebalancer delete WorkloadRebalancer.
func RemoveWorkloadRebalancer(client karmada.Interface, name string) {
ginkgo.By(fmt.Sprintf("Removing WorkloadRebalancer(%s)", name), func() {
err := client.AppsV1alpha1().WorkloadRebalancers().Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
// UpdateWorkloadRebalancer udpate WorkloadRebalancer with karmada client.
func UpdateWorkloadRebalancer(client karmada.Interface, name string, workloads []appsv1alpha1.ObjectReference) {
ginkgo.By(fmt.Sprintf("Updating WorkloadRebalancer(%s)'s workloads", name), func() {
gomega.Eventually(func() error {
rebalancer, err := client.AppsV1alpha1().WorkloadRebalancers().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return err
}
rebalancer.Spec.Workloads = workloads
_, err = client.AppsV1alpha1().WorkloadRebalancers().Update(context.TODO(), rebalancer, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
}
// WaitRebalancerObservedWorkloads wait observedWorkloads in WorkloadRebalancer fit with util timeout
func WaitRebalancerObservedWorkloads(client karmada.Interface, name string, expectedWorkloads []appsv1alpha1.ObservedWorkload) {
ginkgo.By(fmt.Sprintf("Waiting for WorkloadRebalancer(%s) observedWorkload match to expected result", name), func() {
gomega.Eventually(func() error {
rebalancer, err := client.AppsV1alpha1().WorkloadRebalancers().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return err
}
if !reflect.DeepEqual(rebalancer.Status.ObservedWorkloads, expectedWorkloads) {
return fmt.Errorf("observedWorkloads: %+v, expectedWorkloads: %+v", rebalancer.Status.ObservedWorkloads, expectedWorkloads)
}
return nil
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
}

View File

@ -80,6 +80,7 @@ const (
mcsNamePrefix = "mcs-" mcsNamePrefix = "mcs-"
ppNamePrefix = "pp-" ppNamePrefix = "pp-"
cppNamePrefix = "cpp-" cppNamePrefix = "cpp-"
workloadRebalancerPrefix = "rebalancer-"
updateDeploymentReplicas = 2 updateDeploymentReplicas = 2
updateStatefulSetReplicas = 2 updateStatefulSetReplicas = 2

View File

@ -0,0 +1,202 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"sort"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
appsv1alpha1 "github.com/karmada-io/karmada/pkg/apis/apps/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
"github.com/karmada-io/karmada/test/helper"
)
// test case dimension:
//
// schedule strategy: static weight, dynamic weight, aggregated
// resource type: workload type like deployment, non-workload type like clusterrole
// expected result: successful, not found failure
var _ = ginkgo.Describe("workload rebalancer testing", func() {
var namespace string
var deployName, newAddedDeployName, notExistDeployName, clusterroleName string
var deployObjRef, newAddedDeployObjRef, notExistDeployObjRef, clusterroleObjRef appsv1alpha1.ObjectReference
var deployBindingName, clusterroleBindingName string
var cppName string
var rebalancerName string
var deploy, newAddedDeploy, notExistDeploy *appsv1.Deployment
var clusterrole *rbacv1.ClusterRole
var policy *policyv1alpha1.ClusterPropagationPolicy
var rebalancer *appsv1alpha1.WorkloadRebalancer
var targetClusters []string
var taint corev1.Taint
ginkgo.BeforeEach(func() {
namespace = testNamespace
randomStr := rand.String(RandomStrLength)
deployName = deploymentNamePrefix + randomStr
notExistDeployName = deployName + "-2"
newAddedDeployName = deployName + "-3"
clusterroleName = clusterRoleNamePrefix + rand.String(RandomStrLength)
deployBindingName = names.GenerateBindingName(util.DeploymentKind, deployName)
clusterroleBindingName = names.GenerateBindingName(util.ClusterRoleKind, clusterroleName)
cppName = cppNamePrefix + randomStr
rebalancerName = workloadRebalancerPrefix + randomStr
// sort member clusters in increasing order
targetClusters = framework.ClusterNames()[0:2]
sort.Strings(targetClusters)
taint = corev1.Taint{Key: "workload-rebalancer-test", Effect: corev1.TaintEffectNoExecute}
deploy = helper.NewDeployment(namespace, deployName)
notExistDeploy = helper.NewDeployment(namespace, notExistDeployName)
newAddedDeploy = helper.NewDeployment(namespace, newAddedDeployName)
clusterrole = helper.NewClusterRole(clusterroleName, nil)
policy = helper.NewClusterPropagationPolicy(cppName, []policyv1alpha1.ResourceSelector{
{APIVersion: deploy.APIVersion, Kind: deploy.Kind, Name: deploy.Name, Namespace: deploy.Namespace},
{APIVersion: newAddedDeploy.APIVersion, Kind: newAddedDeploy.Kind, Name: newAddedDeploy.Name, Namespace: newAddedDeploy.Namespace},
{APIVersion: clusterrole.APIVersion, Kind: clusterrole.Kind, Name: clusterrole.Name},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: targetClusters},
})
deployObjRef = appsv1alpha1.ObjectReference{APIVersion: deploy.APIVersion, Kind: deploy.Kind, Name: deploy.Name, Namespace: deploy.Namespace}
notExistDeployObjRef = appsv1alpha1.ObjectReference{APIVersion: notExistDeploy.APIVersion, Kind: notExistDeploy.Kind, Name: notExistDeploy.Name, Namespace: notExistDeploy.Namespace}
newAddedDeployObjRef = appsv1alpha1.ObjectReference{APIVersion: newAddedDeploy.APIVersion, Kind: newAddedDeploy.Kind, Name: newAddedDeploy.Name, Namespace: newAddedDeploy.Namespace}
clusterroleObjRef = appsv1alpha1.ObjectReference{APIVersion: clusterrole.APIVersion, Kind: clusterrole.Kind, Name: clusterrole.Name}
rebalancer = helper.NewWorkloadRebalancer(rebalancerName, []appsv1alpha1.ObjectReference{deployObjRef, clusterroleObjRef, notExistDeployObjRef})
})
ginkgo.JustBeforeEach(func() {
framework.CreateClusterPropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deploy)
framework.CreateDeployment(kubeClient, newAddedDeploy)
framework.CreateClusterRole(kubeClient, clusterrole)
ginkgo.DeferCleanup(func() {
framework.RemoveDeployment(kubeClient, deploy.Namespace, deploy.Name)
framework.RemoveDeployment(kubeClient, newAddedDeploy.Namespace, newAddedDeploy.Name)
framework.RemoveClusterRole(kubeClient, clusterrole.Name)
framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name)
})
})
var checkWorkloadRebalancerResult = func(expectedWorkloads []appsv1alpha1.ObservedWorkload) {
// 1. check rebalancer status: match to `expectedWorkloads`.
framework.WaitRebalancerObservedWorkloads(karmadaClient, rebalancerName, expectedWorkloads)
// 2. check deploy: referenced binding's `spec.rescheduleTriggeredAt` and `status.lastScheduledTime` should be updated.
framework.WaitResourceBindingFitWith(karmadaClient, namespace, deployBindingName, func(rb *workv1alpha2.ResourceBinding) bool {
return bindingHasRescheduled(rb.Spec, rb.Status, rebalancer.CreationTimestamp)
})
// 3. check clusterrole: referenced binding's `spec.rescheduleTriggeredAt` and `status.lastScheduledTime` should be updated.
framework.WaitClusterResourceBindingFitWith(karmadaClient, clusterroleBindingName, func(crb *workv1alpha2.ClusterResourceBinding) bool {
return bindingHasRescheduled(crb.Spec, crb.Status, rebalancer.CreationTimestamp)
})
}
// 1. dynamic weight scheduling
ginkgo.Context("dynamic weight schedule type", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{
DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas,
},
}
policy.Spec.Placement.ClusterTolerations = []corev1.Toleration{{
Key: taint.Key,
Effect: taint.Effect,
Operator: corev1.TolerationOpExists,
TolerationSeconds: pointer.Int64(0),
}}
})
ginkgo.It("reschedule when policy is dynamic weight schedule type", func() {
ginkgo.By("step1: check first schedule result", func() {
// after first schedule, deployment is assigned as 1:2 or 2:1 in target clusters and clusterrole propagated to each cluster.
framework.AssertBindingScheduledClusters(karmadaClient, namespace, deployBindingName, [][]string{targetClusters})
framework.WaitClusterRolePresentOnClustersFitWith(targetClusters, clusterroleName, func(_ *rbacv1.ClusterRole) bool { return true })
})
ginkgo.By("step2: add taints to cluster to mock cluster failure", func() {
err := taintCluster(controlPlaneClient, targetClusters[0], taint)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.AssertBindingScheduledClusters(karmadaClient, namespace, deployBindingName, [][]string{targetClusters[1:]})
framework.WaitGracefulEvictionTasksDone(karmadaClient, namespace, deployBindingName)
err = recoverTaintedCluster(controlPlaneClient, targetClusters[0], taint)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By("step3: trigger a reschedule by WorkloadRebalancer", func() {
framework.CreateWorkloadRebalancer(karmadaClient, rebalancer)
ginkgo.DeferCleanup(func() {
framework.RemoveWorkloadRebalancer(karmadaClient, rebalancerName)
})
// actual replicas propagation of deployment should reschedule back to `targetClusters`,
// which represents rebalancer changed deployment replicas propagation.
framework.AssertBindingScheduledClusters(karmadaClient, namespace, deployBindingName, [][]string{targetClusters})
expectedWorkloads := []appsv1alpha1.ObservedWorkload{
{Workload: deployObjRef, Result: appsv1alpha1.RebalanceSuccessful},
{Workload: notExistDeployObjRef, Result: appsv1alpha1.RebalanceFailed, Reason: appsv1alpha1.RebalanceObjectNotFound},
{Workload: clusterroleObjRef, Result: appsv1alpha1.RebalanceSuccessful},
}
checkWorkloadRebalancerResult(expectedWorkloads)
})
ginkgo.By("step4: udpate WorkloadRebalancer spec workloads", func() {
// update workload list from {deploy, clusterrole, notExistDeployObjRef} to {clusterroleObjRef, newAddedDeployObjRef}
updatedWorkloads := []appsv1alpha1.ObjectReference{clusterroleObjRef, newAddedDeployObjRef}
framework.UpdateWorkloadRebalancer(karmadaClient, rebalancerName, updatedWorkloads)
expectedWorkloads := []appsv1alpha1.ObservedWorkload{
{Workload: deployObjRef, Result: appsv1alpha1.RebalanceSuccessful},
{Workload: newAddedDeployObjRef, Result: appsv1alpha1.RebalanceSuccessful},
{Workload: clusterroleObjRef, Result: appsv1alpha1.RebalanceSuccessful},
}
framework.WaitRebalancerObservedWorkloads(karmadaClient, rebalancerName, expectedWorkloads)
})
})
})
})
func bindingHasRescheduled(spec workv1alpha2.ResourceBindingSpec, status workv1alpha2.ResourceBindingStatus, rebalancerCreationTime metav1.Time) bool {
if *spec.RescheduleTriggeredAt != rebalancerCreationTime || status.LastScheduledTime.Before(spec.RescheduleTriggeredAt) {
klog.Errorf("rebalancerCreationTime: %+v, rescheduleTriggeredAt / lastScheduledTime: %+v / %+v",
rebalancerCreationTime, *spec.RescheduleTriggeredAt, status.LastScheduledTime)
return false
}
return true
}

View File

@ -36,6 +36,7 @@ import (
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1" workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1"
appsv1alpha1 "github.com/karmada-io/karmada/pkg/apis/apps/v1alpha1"
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
@ -978,3 +979,15 @@ func NewWork(workName, workNs, workUID string, raw []byte) *workv1alpha1.Work {
return work return work
} }
// NewWorkloadRebalancer will build a new WorkloadRebalancer object.
func NewWorkloadRebalancer(name string, objectReferences []appsv1alpha1.ObjectReference) *appsv1alpha1.WorkloadRebalancer {
return &appsv1alpha1.WorkloadRebalancer{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: appsv1alpha1.WorkloadRebalancerSpec{
Workloads: objectReferences,
},
}
}