Merge pull request #4764 from chaosi-zju/hpa-e2e
add e2e test for deployment replicas syncer
This commit is contained in:
commit
57c1989667
|
@ -18,6 +18,7 @@ package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
@ -30,81 +31,218 @@ import (
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||||
|
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||||
|
"github.com/karmada-io/karmada/pkg/util"
|
||||||
|
"github.com/karmada-io/karmada/pkg/util/names"
|
||||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||||
"github.com/karmada-io/karmada/test/helper"
|
"github.com/karmada-io/karmada/test/helper"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = ginkgo.Describe("hpa replicas synchronization testing", func() {
|
var _ = ginkgo.Describe("deployment replicas syncer testing", func() {
|
||||||
ginkgo.Context("Replicas synchronization testing", func() {
|
var namespace string
|
||||||
var initReplicas = int32(1)
|
var deploymentName, hpaName, policyName, bindingName string
|
||||||
var policyNamespace, policyName string
|
|
||||||
var namespace, deploymentName, hpaName string
|
|
||||||
var deployment *appsv1.Deployment
|
var deployment *appsv1.Deployment
|
||||||
var hpa *autoscalingv2.HorizontalPodAutoscaler
|
var hpa *autoscalingv2.HorizontalPodAutoscaler
|
||||||
var policy *policyv1alpha1.PropagationPolicy
|
var policy *policyv1alpha1.PropagationPolicy
|
||||||
|
var targetClusters []string
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
policyNamespace = testNamespace
|
|
||||||
namespace = testNamespace
|
namespace = testNamespace
|
||||||
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
|
deploymentName = deploymentNamePrefix + rand.String(RandomStrLength)
|
||||||
deploymentName = policyName
|
hpaName = deploymentName
|
||||||
hpaName = policyName
|
policyName = deploymentName
|
||||||
|
bindingName = names.GenerateBindingName(util.DeploymentKind, deploymentName)
|
||||||
|
|
||||||
|
// sort member clusters in increasing order
|
||||||
|
targetClusters = framework.ClusterNames()[0:2]
|
||||||
|
sort.Strings(targetClusters)
|
||||||
|
|
||||||
deployment = helper.NewDeployment(namespace, deploymentName)
|
deployment = helper.NewDeployment(namespace, deploymentName)
|
||||||
deployment.Spec.Replicas = pointer.Int32(initReplicas)
|
|
||||||
hpa = helper.NewHPA(namespace, hpaName, deploymentName)
|
hpa = helper.NewHPA(namespace, hpaName, deploymentName)
|
||||||
hpa.Spec.MinReplicas = pointer.Int32(2)
|
hpa.Spec.MinReplicas = pointer.Int32(2)
|
||||||
|
policy = helper.NewPropagationPolicy(namespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
{APIVersion: deployment.APIVersion, Kind: deployment.Kind, Name: deployment.Name},
|
||||||
{
|
{APIVersion: hpa.APIVersion, Kind: hpa.Kind, Name: hpa.Name},
|
||||||
APIVersion: deployment.APIVersion,
|
|
||||||
Kind: deployment.Kind,
|
|
||||||
Name: deployment.Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
APIVersion: hpa.APIVersion,
|
|
||||||
Kind: hpa.Kind,
|
|
||||||
Name: hpa.Name,
|
|
||||||
},
|
|
||||||
}, policyv1alpha1.Placement{
|
}, policyv1alpha1.Placement{
|
||||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
ClusterNames: framework.ClusterNames(),
|
ClusterNames: targetClusters,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.JustBeforeEach(func() {
|
||||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||||
framework.CreateDeployment(kubeClient, deployment)
|
framework.CreateDeployment(kubeClient, deployment)
|
||||||
framework.CreateHPA(kubeClient, hpa)
|
framework.CreateHPA(kubeClient, hpa)
|
||||||
|
|
||||||
ginkgo.DeferCleanup(func() {
|
ginkgo.DeferCleanup(func() {
|
||||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||||
framework.RemoveHPA(kubeClient, namespace, hpa.Name)
|
framework.RemoveHPA(kubeClient, namespace, hpa.Name)
|
||||||
framework.WaitDeploymentDisappearOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
|
framework.WaitDeploymentDisappearOnClusters(targetClusters, deployment.Namespace, deployment.Name)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("deployment has been scaled up and synchronized to Karmada", func() {
|
ginkgo.Context("when policy is Duplicated schedule type", func() {
|
||||||
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
|
ginkgo.BeforeEach(func() {
|
||||||
func(deployment *appsv1.Deployment) bool {
|
deployment.Spec.Replicas = pointer.Int32(2)
|
||||||
return true
|
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
|
// Case 1: Deployment(replicas=2) | Policy(Duplicated, two clusters) | HPA(minReplicas=2)
|
||||||
func(deployment *appsv1.Deployment) bool {
|
// Expected result: hpa scaling not take effect in updating spec, manually modify spec have no action.
|
||||||
return *deployment.Spec.Replicas == initReplicas
|
ginkgo.It("general case combined hpa scaling and manually modify in Duplicated type", func() {
|
||||||
|
ginkgo.By("step1: propagate each 2 replicas to two clusters", func() {
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
|
||||||
})
|
})
|
||||||
|
|
||||||
expectedReplicas := initReplicas
|
ginkgo.By("step2: hpa scale each member cluster replicas from 2 to 3", func() {
|
||||||
|
framework.UpdateHPAWithMinReplicas(kubeClient, namespace, hpa.Name, 3)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step3: manually add deployment template replicas from 2 to 4", func() {
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 4)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step4: manually decrease deployment template replicas from 2 to 1", func() {
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 1)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("when policy is Divided schedule type, each cluster have more that one replica", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
|
||||||
|
deployment.Spec.Replicas = pointer.Int32(4)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Case 2: Deployment(replicas=4) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=2)
|
||||||
|
// Expected result: hpa scaling can take effect in updating spec, while manually modify not.
|
||||||
|
ginkgo.It("general case combined hpa scaling and manually modify in Divided type", func() {
|
||||||
|
ginkgo.By("step1: propagate 4 replicas to two clusters", func() {
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step2: hpa scale each member cluster replicas from 2 to 3", func() {
|
||||||
|
framework.UpdateHPAWithMinReplicas(kubeClient, namespace, hpa.Name, 3)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step3: manually add deployment template replicas from 6 to 10", func() {
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 10)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step4: manually decrease deployment template replicas from 6 to 2", func() {
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("when policy is Divided schedule type, one cluster have no replica", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
|
||||||
|
deployment.Spec.Replicas = pointer.Int32(1)
|
||||||
|
hpa.Spec.MinReplicas = pointer.Int32(1)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Case 3: Deployment(replicas=1) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=1)
|
||||||
|
// Expected result: manually modify can take effect in updating spec.
|
||||||
|
ginkgo.It("0/1 case, manually modify replicas from 1 to 2", func() {
|
||||||
|
ginkgo.By("step1: propagate 1 replicas to two clusters", func() {
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step2: manually add deployment template replicas from 1 to 2", func() {
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{1, 1})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("when policy is Divided schedule type, remove one cluster's replicas", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
|
||||||
|
deployment.Spec.Replicas = pointer.Int32(2)
|
||||||
|
hpa.Spec.MinReplicas = pointer.Int32(1)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Case 4: Deployment(replicas=2) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=1)
|
||||||
|
// Expected result: manually modify can take effect in updating spec.
|
||||||
|
ginkgo.It("0/1 case, manually modify replicas from 2 to 1", func() {
|
||||||
|
ginkgo.By("step1: propagate 2 replicas to two clusters", func() {
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{1, 1})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step2: manually add deployment template replicas from 2 to 1", func() {
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, 1)
|
||||||
|
framework.WaitResourceBindingFitWith(karmadaClient, namespace, bindingName, func(rb *workv1alpha2.ResourceBinding) bool {
|
||||||
|
return len(rb.Status.AggregatedStatus) == 1
|
||||||
|
})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("when policy is Divided schedule type, propagate 1 replica but hpa minReplicas is 2", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
|
||||||
|
deployment.Spec.Replicas = pointer.Int32(1)
|
||||||
|
hpa.Spec.MinReplicas = pointer.Int32(2)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Case 5: Deployment(replicas=1) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=2)
|
||||||
|
// Expected result: it will go through such a process:
|
||||||
|
// 1. deployment.spec.replicas=1, actual replicas in member1:member2 = 1:0
|
||||||
|
// 2. hpa take effect in member1, so actual replicas in member1:member2 = 2:0
|
||||||
|
// 3. deployment template updated to 2/2
|
||||||
|
// 4. reschedule, assign replicas to member1:member2 = 1:1
|
||||||
|
// 5. member1 replicas is retained, so actual replicas in member1:member2 = 2:1
|
||||||
|
// 6. hpa take effect in member2, so replicas becomes member1:member2 = 2:2
|
||||||
|
// 7. deployment template updated to 4/4
|
||||||
|
ginkgo.It("propagate 1 replica but hpa minReplicas is 2", func() {
|
||||||
|
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
|
||||||
|
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// assertDeploymentWorkloadReplicas assert replicas in each member cluster eventually equal to @expectedReplicas
|
||||||
|
func assertDeploymentWorkloadReplicas(namespace, name string, clusters []string, expectedReplicas []int32) {
|
||||||
|
gomega.Expect(len(clusters)).Should(gomega.Equal(len(expectedReplicas)))
|
||||||
|
for i, cluster := range clusters {
|
||||||
|
if expectedReplicas[i] == 0 {
|
||||||
|
framework.WaitDeploymentDisappearOnCluster(cluster, namespace, name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
framework.WaitDeploymentPresentOnClustersFitWith([]string{cluster}, namespace, name, func(deployment *appsv1.Deployment) bool {
|
||||||
|
klog.Infof("in %s cluster, got: %d, expect: %d", cluster, *deployment.Spec.Replicas, expectedReplicas[i])
|
||||||
|
return *deployment.Spec.Replicas == expectedReplicas[i]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertDeploymentTemplateReplicas assert replicas in template spec eventually equal to @expectedSpecReplicas
|
||||||
|
func assertDeploymentTemplateReplicas(namespace, name string, expectedSpecReplicas int32) {
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
deploymentExist, err := kubeClient.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
deploymentExist, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
klog.Infof("got: %d, expect: %d", *deploymentExist.Spec.Replicas, expectedReplicas)
|
klog.Infof("template spec replicas, got: %d, expect: %d", *deploymentExist.Spec.Replicas, expectedSpecReplicas)
|
||||||
return (*deploymentExist.Spec.Replicas == expectedReplicas) && (deploymentExist.Generation == deploymentExist.Status.ObservedGeneration)
|
return (*deploymentExist.Spec.Replicas == expectedSpecReplicas) && (deploymentExist.Generation == deploymentExist.Status.ObservedGeneration)
|
||||||
}, time.Minute, pollInterval).Should(gomega.Equal(true))
|
}, time.Minute, pollInterval).Should(gomega.Equal(true))
|
||||||
})
|
}
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
|
@ -42,3 +42,15 @@ func RemoveHPA(client kubernetes.Interface, namespace, name string) {
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateHPAWithMinReplicas update HPA with replicas.
|
||||||
|
func UpdateHPAWithMinReplicas(client kubernetes.Interface, namespace, name string, minReplicas int32) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Updating HPA(%s/%s)", namespace, name), func() {
|
||||||
|
newHPA, err := client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
newHPA.Spec.MinReplicas = &minReplicas
|
||||||
|
_, err = client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Update(context.TODO(), newHPA, metav1.UpdateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package helper
|
package helper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/onsi/gomega"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -97,6 +98,26 @@ func NewExplicitPriorityClusterPropagationPolicy(policyName string, rsSelectors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewStaticWeightPolicyStrategy create static weight policy strategy with specific weights
|
||||||
|
// e.g: @clusters=[member1, member2], @weights=[1, 1], means static weight `member1:member2=1:1`
|
||||||
|
func NewStaticWeightPolicyStrategy(clusters []string, weights []int64) *policyv1alpha1.ReplicaSchedulingStrategy {
|
||||||
|
gomega.Expect(len(clusters)).Should(gomega.Equal(len(weights)))
|
||||||
|
staticWeightList := make([]policyv1alpha1.StaticClusterWeight, 0)
|
||||||
|
for i, clusterName := range clusters {
|
||||||
|
staticWeightList = append(staticWeightList, policyv1alpha1.StaticClusterWeight{
|
||||||
|
TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{clusterName}},
|
||||||
|
Weight: weights[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||||
|
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||||
|
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
|
||||||
|
WeightPreference: &policyv1alpha1.ClusterPreferences{
|
||||||
|
StaticWeightList: staticWeightList,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewOverridePolicy will build a OverridePolicy object.
|
// NewOverridePolicy will build a OverridePolicy object.
|
||||||
func NewOverridePolicy(namespace, policyName string, rsSelectors []policyv1alpha1.ResourceSelector, clusterAffinity policyv1alpha1.ClusterAffinity, overriders policyv1alpha1.Overriders) *policyv1alpha1.OverridePolicy {
|
func NewOverridePolicy(namespace, policyName string, rsSelectors []policyv1alpha1.ResourceSelector, clusterAffinity policyv1alpha1.ClusterAffinity, overriders policyv1alpha1.Overriders) *policyv1alpha1.OverridePolicy {
|
||||||
return &policyv1alpha1.OverridePolicy{
|
return &policyv1alpha1.OverridePolicy{
|
||||||
|
|
Loading…
Reference in New Issue