e2e: Add e2e for CronFederatedHPA
Signed-off-by: jwcesign <jiangwei115@huawei.com>
This commit is contained in:
parent
2a314eb46e
commit
7ac69c1864
|
@ -231,6 +231,9 @@ util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_AGGREGATION_APISERVER_LAB
|
||||||
# deploy karmada-search on host cluster
|
# deploy karmada-search on host cluster
|
||||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml"
|
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml"
|
||||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||||
|
# deploy karmada-metrics-adapter on host cluster
|
||||||
|
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-metrics-adapter.yaml"
|
||||||
|
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_METRICS_ADAPTER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||||
|
|
||||||
# install CRD APIs on karmada apiserver.
|
# install CRD APIs on karmada apiserver.
|
||||||
if ! kubectl config get-contexts "karmada-apiserver" > /dev/null 2>&1;
|
if ! kubectl config get-contexts "karmada-apiserver" > /dev/null 2>&1;
|
||||||
|
@ -260,6 +263,11 @@ kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/ka
|
||||||
# make sure apiservice for v1alpha1.search.karmada.io is Available
|
# make sure apiservice for v1alpha1.search.karmada.io is Available
|
||||||
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_SEARCH_LABEL}"
|
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_SEARCH_LABEL}"
|
||||||
|
|
||||||
|
# deploy APIService on karmada apiserver for karmada-metrics-adapter
|
||||||
|
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-metrics-adapter-apiservice.yaml"
|
||||||
|
# make sure apiservice for karmada metrics adapter is Available
|
||||||
|
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_METRICS_ADAPTER_LABEL}"
|
||||||
|
|
||||||
# deploy cluster proxy rbac for admin
|
# deploy cluster proxy rbac for admin
|
||||||
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml"
|
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml"
|
||||||
|
|
||||||
|
|
|
@ -135,6 +135,7 @@ kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_C
|
||||||
kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
||||||
kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
||||||
kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
||||||
|
kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
||||||
|
|
||||||
#step5. install karmada control plane components
|
#step5. install karmada control plane components
|
||||||
"${REPO_ROOT}"/hack/deploy-karmada.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}"
|
"${REPO_ROOT}"/hack/deploy-karmada.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}"
|
||||||
|
@ -169,12 +170,17 @@ kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MOD
|
||||||
#step7. deploy karmada agent in pull mode member clusters
|
#step7. deploy karmada agent in pull mode member clusters
|
||||||
"${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}"
|
"${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}"
|
||||||
|
|
||||||
|
#step8. deploy metrics adapter in member clusters
|
||||||
|
"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${MEMBER_CLUSTER_1_NAME}"
|
||||||
|
"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${MEMBER_CLUSTER_2_NAME}"
|
||||||
|
"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}"
|
||||||
|
|
||||||
# wait all of clusters member1, member2 and member3 status is ready
|
# wait all of clusters member1, member2 and member3 status is ready
|
||||||
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_NAME}"
|
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_NAME}"
|
||||||
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_NAME}"
|
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_NAME}"
|
||||||
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_NAME}"
|
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_NAME}"
|
||||||
|
|
||||||
#step8. merge temporary kubeconfig of member clusters by kubectl
|
#step9. merge temporary kubeconfig of member clusters by kubectl
|
||||||
export KUBECONFIG=$(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX} | tr '\n' ':')
|
export KUBECONFIG=$(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX} | tr '\n' ':')
|
||||||
kubectl config view --flatten > ${MEMBER_CLUSTER_KUBECONFIG}
|
kubectl config view --flatten > ${MEMBER_CLUSTER_KUBECONFIG}
|
||||||
rm $(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX})
|
rm $(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX})
|
||||||
|
|
|
@ -0,0 +1,199 @@
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Karmada Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
|
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||||
|
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||||
|
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||||
|
"github.com/karmada-io/karmada/test/helper"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
CronFederatedHPA focus on scaling FederatedHPA or other resource with scale subresource (e.g. Deployment, StatefulSet).
|
||||||
|
Test Case Overview:
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
Scale FederatedHPA.
|
||||||
|
case 2:
|
||||||
|
Scale deployment.
|
||||||
|
case 3:
|
||||||
|
Test suspend rule in CronFederatedHPA
|
||||||
|
case 4:
|
||||||
|
Test unsuspend rule then suspend it in CronFederatedHPA
|
||||||
|
*/
|
||||||
|
var _ = ginkgo.Describe("[CronFederatedHPA] CronFederatedHPA testing", func() {
|
||||||
|
var cronFHPAName, fhpaName, policyName, deploymentName string
|
||||||
|
var cronFHPA *autoscalingv1alpha1.CronFederatedHPA
|
||||||
|
var fhpa *autoscalingv1alpha1.FederatedHPA
|
||||||
|
var deployment *appsv1.Deployment
|
||||||
|
var policy *policyv1alpha1.PropagationPolicy
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
cronFHPAName = cronFedratedHPANamePrefix + rand.String(RandomStrLength)
|
||||||
|
fhpaName = federatedHPANamePrefix + rand.String(RandomStrLength)
|
||||||
|
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
|
||||||
|
deploymentName = policyName
|
||||||
|
|
||||||
|
deployment = helper.NewDeployment(testNamespace, deploymentName)
|
||||||
|
policy = helper.NewPropagationPolicy(testNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
|
{
|
||||||
|
APIVersion: deployment.APIVersion,
|
||||||
|
Kind: deployment.Kind,
|
||||||
|
Name: deploymentName,
|
||||||
|
},
|
||||||
|
}, policyv1alpha1.Placement{
|
||||||
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
|
ClusterNames: framework.ClusterNames(),
|
||||||
|
},
|
||||||
|
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
|
||||||
|
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
|
||||||
|
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustBeforeEach(func() {
|
||||||
|
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||||
|
framework.CreateDeployment(kubeClient, deployment)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||||
|
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// case 1: Scale FederatedHPA.
|
||||||
|
ginkgo.Context("Scale FederatedHPA", func() {
|
||||||
|
targetMinReplicas := pointer.Int32(2)
|
||||||
|
targetMaxReplicas := pointer.Int32(100)
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
// */1 * * * * means the rule will be triggered every 1 minute
|
||||||
|
rule := helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", false, nil, targetMinReplicas, targetMaxReplicas)
|
||||||
|
fhpa = helper.NewFederatedHPA(testNamespace, fhpaName, deploymentName)
|
||||||
|
cronFHPA = helper.NewCronFederatedHPAWithScalingFHPA(testNamespace, cronFHPAName, fhpaName, rule)
|
||||||
|
|
||||||
|
framework.CreateFederatedHPA(karmadaClient, fhpa)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
framework.RemoveFederatedHPA(karmadaClient, testNamespace, fhpaName)
|
||||||
|
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("Scale FederatedHPA testing", func() {
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*fhpa.Spec.MinReplicas))
|
||||||
|
|
||||||
|
// Create CronFederatedHPA to scale FederatedHPA
|
||||||
|
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)
|
||||||
|
|
||||||
|
// Wait CronFederatedHPA to scale FederatedHPA's minReplicas which will trigger scaling deployment's replicas to minReplicas
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*targetMinReplicas))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// case 2. Scale deployment.
|
||||||
|
ginkgo.Context("Scale Deployment", func() {
|
||||||
|
targetReplicas := pointer.Int32(4)
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
// */1 * * * * means the rule will be executed every 1 minute
|
||||||
|
rule := helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", false, targetReplicas, nil, nil)
|
||||||
|
cronFHPA = helper.NewCronFederatedHPAWithScalingDeployment(testNamespace, cronFHPAName, deploymentName, rule)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("Scale Deployment testing", func() {
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
|
||||||
|
|
||||||
|
// Create CronFederatedHPA to scale Deployment
|
||||||
|
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)
|
||||||
|
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*targetReplicas))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// case 3. Test suspend rule in CronFederatedHPA
|
||||||
|
ginkgo.Context("Test suspend rule in CronFederatedHPA", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
// */1 * * * * means the rule will be executed every 1 minute
|
||||||
|
rule := helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", true, pointer.Int32(30), nil, nil)
|
||||||
|
cronFHPA = helper.NewCronFederatedHPAWithScalingDeployment(testNamespace, cronFHPAName, deploymentName, rule)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("Test suspend rule with CronFederatedHPA", func() {
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
|
||||||
|
|
||||||
|
// Create CronFederatedHPA to scale Deployment
|
||||||
|
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)
|
||||||
|
|
||||||
|
// */1 * * * * means the rule will be triggered every 1 minute
|
||||||
|
// So wait for 1m30s and check whether the replicas changed and whether the suspend field works
|
||||||
|
time.Sleep(time.Minute*1 + time.Second*30)
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// case 4. Test unsuspend rule then suspend it in CronFederatedHPA
|
||||||
|
ginkgo.Context("Test unsuspend rule then suspend it in CronFederatedHPA", func() {
|
||||||
|
rule := autoscalingv1alpha1.CronFederatedHPARule{}
|
||||||
|
targetReplicas := pointer.Int32(4)
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
// */1 * * * * means the rule will be executed every 1 minute
|
||||||
|
rule = helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", false, targetReplicas, nil, nil)
|
||||||
|
cronFHPA = helper.NewCronFederatedHPAWithScalingDeployment(testNamespace, cronFHPAName, deploymentName, rule)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("Test unsuspend rule then suspend it in CronFederatedHPA", func() {
|
||||||
|
// Step 1.Check the init replicas, which should be 3(deployment.Spec.Replicas)
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
|
||||||
|
|
||||||
|
// Step 2.Create CronFederatedHPA to scale Deployment
|
||||||
|
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*targetReplicas))
|
||||||
|
|
||||||
|
// Step 3.Update replicas to 3(deployment.Spec.Replicas)
|
||||||
|
framework.UpdateDeploymentReplicas(kubeClient, deployment, *deployment.Spec.Replicas)
|
||||||
|
|
||||||
|
// Step 4. Suspend rule
|
||||||
|
rule.Suspend = pointer.Bool(true)
|
||||||
|
framework.UpdateCronFederatedHPAWithRule(karmadaClient, testNamespace, cronFHPAName, []autoscalingv1alpha1.CronFederatedHPARule{rule})
|
||||||
|
|
||||||
|
// Step 5. Check the replicas, which should not be changed
|
||||||
|
// */1 * * * * means the rule will be triggered every 1 minute
|
||||||
|
// So wait for 1m30s and check whether the replicas changed and whether the suspend field works
|
||||||
|
time.Sleep(time.Minute*1 + time.Second*30)
|
||||||
|
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Karmada Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||||
|
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateCronFederatedHPA create CronFederatedHPA with karmada client.
|
||||||
|
func CreateCronFederatedHPA(client karmada.Interface, fhpa *autoscalingv1alpha1.CronFederatedHPA) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Create FederatedHPA(%s/%s)", fhpa.Namespace, fhpa.Name), func() {
|
||||||
|
_, err := client.AutoscalingV1alpha1().CronFederatedHPAs(fhpa.Namespace).Create(context.TODO(), fhpa, metav1.CreateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveCronFederatedHPA delete CronFederatedHPA with karmada client.
|
||||||
|
func RemoveCronFederatedHPA(client karmada.Interface, namespace, name string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Remove FederatedHPA(%s/%s)", namespace, name), func() {
|
||||||
|
err := client.AutoscalingV1alpha1().CronFederatedHPAs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCronFederatedHPAWithRule update CronFederatedHPA with karmada client.
|
||||||
|
func UpdateCronFederatedHPAWithRule(client karmada.Interface, namespace, name string, rule []autoscalingv1alpha1.CronFederatedHPARule) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Updating CronFederatedHPA(%s/%s)", namespace, name), func() {
|
||||||
|
newCronFederatedHPA, err := client.AutoscalingV1alpha1().CronFederatedHPAs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
|
newCronFederatedHPA.Spec.Rules = rule
|
||||||
|
_, err = client.AutoscalingV1alpha1().CronFederatedHPAs(namespace).Update(context.TODO(), newCronFederatedHPA, metav1.UpdateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
|
@ -223,3 +223,25 @@ func WaitDeploymentGetByClientFitWith(client kubernetes.Interface, namespace, na
|
||||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WaitDeploymentReplicasFitWith(clusters []string, namespace, name string, expectReplicas int) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Check deployment(%s/%s) replicas fit with expecting", namespace, name), func() {
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
totalReplicas := 0
|
||||||
|
for _, cluster := range clusters {
|
||||||
|
clusterClient := GetClusterClient(cluster)
|
||||||
|
if clusterClient == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dep, err := clusterClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalReplicas += int(*dep.Spec.Replicas)
|
||||||
|
}
|
||||||
|
klog.Infof("The total replicas of deployment(%s/%s) is %d", namespace, name, totalReplicas)
|
||||||
|
return totalReplicas == expectReplicas
|
||||||
|
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Karmada Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||||
|
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateFederatedHPA create FederatedHPA with karmada client.
|
||||||
|
func CreateFederatedHPA(client karmada.Interface, fhpa *autoscalingv1alpha1.FederatedHPA) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Create FederatedHPA(%s/%s)", fhpa.Namespace, fhpa.Name), func() {
|
||||||
|
_, err := client.AutoscalingV1alpha1().FederatedHPAs(fhpa.Namespace).Create(context.TODO(), fhpa, metav1.CreateOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveFederatedHPA delete FederatedHPA with karmada client.
|
||||||
|
func RemoveFederatedHPA(client karmada.Interface, namespace, name string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Remove FederatedHPA(%s/%s)", namespace, name), func() {
|
||||||
|
err := client.AutoscalingV1alpha1().FederatedHPAs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
|
@ -56,6 +56,8 @@ const (
|
||||||
roleBindingNamePrefix = "rolebinding-"
|
roleBindingNamePrefix = "rolebinding-"
|
||||||
clusterRoleBindingNamePrefix = "clusterrolebinding-"
|
clusterRoleBindingNamePrefix = "clusterrolebinding-"
|
||||||
podDisruptionBudgetNamePrefix = "poddisruptionbudget-"
|
podDisruptionBudgetNamePrefix = "poddisruptionbudget-"
|
||||||
|
federatedHPANamePrefix = "fhpa-"
|
||||||
|
cronFedratedHPANamePrefix = "cronfhpa-"
|
||||||
|
|
||||||
updateDeploymentReplicas = 2
|
updateDeploymentReplicas = 2
|
||||||
updateStatefulSetReplicas = 2
|
updateStatefulSetReplicas = 2
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
@ -18,6 +19,7 @@ import (
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1"
|
workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1"
|
||||||
|
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -31,6 +33,104 @@ const (
|
||||||
ResourceUnitGPU int64 = 1
|
ResourceUnitGPU int64 = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewCronFederatedHPAWithScalingDeployment will build a CronFederatedHPA object with scaling deployment.
|
||||||
|
func NewCronFederatedHPAWithScalingDeployment(namespace, name, deploymentName string,
|
||||||
|
rule autoscalingv1alpha1.CronFederatedHPARule) *autoscalingv1alpha1.CronFederatedHPA {
|
||||||
|
return &autoscalingv1alpha1.CronFederatedHPA{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "autoscaling.karmada.io/v1alpha1",
|
||||||
|
Kind: "CronFederatedHPA",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: autoscalingv1alpha1.CronFederatedHPASpec{
|
||||||
|
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||||
|
APIVersion: "apps/v1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: deploymentName,
|
||||||
|
},
|
||||||
|
Rules: []autoscalingv1alpha1.CronFederatedHPARule{rule},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCronFederatedHPAWithScalingFHPA will build a CronFederatedHPA object with scaling FederatedHPA.
|
||||||
|
func NewCronFederatedHPAWithScalingFHPA(namespace, name, fhpaName string,
|
||||||
|
rule autoscalingv1alpha1.CronFederatedHPARule) *autoscalingv1alpha1.CronFederatedHPA {
|
||||||
|
return &autoscalingv1alpha1.CronFederatedHPA{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "autoscaling.karmada.io/v1alpha1",
|
||||||
|
Kind: "CronFederatedHPA",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: autoscalingv1alpha1.CronFederatedHPASpec{
|
||||||
|
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||||
|
APIVersion: "autoscaling.karmada.io/v1alpha1",
|
||||||
|
Kind: "FederatedHPA",
|
||||||
|
Name: fhpaName,
|
||||||
|
},
|
||||||
|
Rules: []autoscalingv1alpha1.CronFederatedHPARule{rule},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCronFederatedHPARule will build a CronFederatedHPARule object.
|
||||||
|
func NewCronFederatedHPARule(name, cron string, suspend bool, targetReplicas, targetMinReplicas, targetMaxReplicas *int32) autoscalingv1alpha1.CronFederatedHPARule {
|
||||||
|
return autoscalingv1alpha1.CronFederatedHPARule{
|
||||||
|
Name: name,
|
||||||
|
Schedule: cron,
|
||||||
|
TargetReplicas: targetReplicas,
|
||||||
|
TargetMinReplicas: targetMinReplicas,
|
||||||
|
TargetMaxReplicas: targetMaxReplicas,
|
||||||
|
Suspend: pointer.Bool(suspend),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFederatedHPA will build a FederatedHPA object.
|
||||||
|
func NewFederatedHPA(namespace, name, scaleTargetDeployment string) *autoscalingv1alpha1.FederatedHPA {
|
||||||
|
return &autoscalingv1alpha1.FederatedHPA{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "autoscaling.karmada.io/v1alpha1",
|
||||||
|
Kind: "FederatedHPA",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: autoscalingv1alpha1.FederatedHPASpec{
|
||||||
|
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||||
|
APIVersion: "apps/v1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: scaleTargetDeployment,
|
||||||
|
},
|
||||||
|
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
|
||||||
|
ScaleDown: &autoscalingv2.HPAScalingRules{
|
||||||
|
StabilizationWindowSeconds: pointer.Int32(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
MinReplicas: pointer.Int32(1),
|
||||||
|
MaxReplicas: 1,
|
||||||
|
Metrics: []autoscalingv2.MetricSpec{
|
||||||
|
{
|
||||||
|
Type: autoscalingv2.ResourceMetricSourceType,
|
||||||
|
Resource: &autoscalingv2.ResourceMetricSource{
|
||||||
|
Name: corev1.ResourceCPU,
|
||||||
|
Target: autoscalingv2.MetricTarget{
|
||||||
|
Type: autoscalingv2.UtilizationMetricType,
|
||||||
|
AverageUtilization: pointer.Int32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewDeployment will build a deployment object.
|
// NewDeployment will build a deployment object.
|
||||||
func NewDeployment(namespace string, name string) *appsv1.Deployment {
|
func NewDeployment(namespace string, name string) *appsv1.Deployment {
|
||||||
podLabels := map[string]string{"app": "nginx"}
|
podLabels := map[string]string{"app": "nginx"}
|
||||||
|
@ -58,6 +158,9 @@ func NewDeployment(namespace string, name string) *appsv1.Deployment {
|
||||||
Name: "nginx",
|
Name: "nginx",
|
||||||
Image: "nginx:1.19.0",
|
Image: "nginx:1.19.0",
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Requests: map[corev1.ResourceName]resource.Quantity{
|
||||||
|
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
},
|
||||||
Limits: map[corev1.ResourceName]resource.Quantity{
|
Limits: map[corev1.ResourceName]resource.Quantity{
|
||||||
corev1.ResourceCPU: resource.MustParse("100m"),
|
corev1.ResourceCPU: resource.MustParse("100m"),
|
||||||
},
|
},
|
||||||
|
|
Loading…
Reference in New Issue