Merge ff7874dff3 into a9292351c3
This commit is contained in:
commit
3340cc7b1c
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// RunE2ETests checks configuration parameters (specified through flags) and then runs
|
||||
// E2E tests using the Ginkgo runner.
|
||||
// If a "report directory" is specified, one or more JUnit test reports will be
|
||||
// generated in this directory, and cluster logs will also be saved.
|
||||
// This function is called on each Ginkgo node in parallel mode.
|
||||
func RunE2ETests(t *testing.T) {
|
||||
runtimeutils.ReallyCrash = true
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
gomega.RegisterFailHandler(framework.Fail)
|
||||
suiteConfig, _ := ginkgo.GinkgoConfiguration()
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 {
|
||||
suiteConfig.SkipStrings = []string{`\[Flaky\]|\[Feature:.+\]`}
|
||||
}
|
||||
ginkgo.RunSpecs(t, "Kubernetes e2e suite")
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
)
|
||||
|
||||
// handleFlags sets up all flags and parses the command line.
|
||||
func handleFlags() {
|
||||
config.CopyFlags(config.Flags, flag.CommandLine)
|
||||
framework.RegisterCommonFlags(flag.CommandLine)
|
||||
framework.RegisterClusterFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Register test flags, then parse flags.
|
||||
handleFlags()
|
||||
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
RunE2ETests(t)
|
||||
}
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
|
||||
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
podsecurity "k8s.io/pod-security-admission/api"
|
||||
|
||||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = utils.RecommenderE2eDescribe("Flags", func() {
|
||||
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline
|
||||
|
||||
var vpaClientSet vpa_clientset.Interface
|
||||
var hamsterNamespace string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
vpaClientSet = utils.GetVpaClientSet(f)
|
||||
hamsterNamespace = f.Namespace.Name
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
f.ClientSet.AppsV1().Deployments(utils.RecommenderNamespace).Delete(context.TODO(), utils.RecommenderDeploymentName, metav1.DeleteOptions{})
|
||||
})
|
||||
|
||||
ginkgo.It("starts recommender with --vpa-object-namespace parameter", func() {
|
||||
ginkgo.By("Setting up VPA deployment")
|
||||
ignoredNamespace, err := f.CreateNamespace(context.TODO(), "ignored-namespace", nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
f.Namespace.Name = utils.RecommenderNamespace
|
||||
vpaDeployment := utils.NewVPADeployment(f, []string{
|
||||
"--recommender-interval=10s",
|
||||
fmt.Sprintf("--vpa-object-namespace=%s", hamsterNamespace),
|
||||
})
|
||||
utils.StartDeploymentPods(f, vpaDeployment)
|
||||
|
||||
testIncludedAndIgnoredNamespaces(f, vpaClientSet, hamsterNamespace, ignoredNamespace.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("starts recommender with --ignored-vpa-object-namespaces parameter", func() {
|
||||
ginkgo.By("Setting up VPA deployment")
|
||||
ignoredNamespace, err := f.CreateNamespace(context.TODO(), "ignored-namespace", nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
f.Namespace.Name = utils.RecommenderNamespace
|
||||
vpaDeployment := utils.NewVPADeployment(f, []string{
|
||||
"--recommender-interval=10s",
|
||||
fmt.Sprintf("--ignored-vpa-object-namespaces=%s", ignoredNamespace.Name),
|
||||
})
|
||||
utils.StartDeploymentPods(f, vpaDeployment)
|
||||
|
||||
testIncludedAndIgnoredNamespaces(f, vpaClientSet, hamsterNamespace, ignoredNamespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
// Create VPA and deployment in 2 namespaces, 1 should be ignored
|
||||
// Ignored namespace VPA and deployment are intentionally created first
|
||||
// so that by the time included namespace has recommendation generated,
|
||||
// we know that ignored namespace has been waiting long enough.
|
||||
func testIncludedAndIgnoredNamespaces(f *framework.Framework, vpaClientSet vpa_clientset.Interface, includedNamespace, ignoredNamespace string) {
|
||||
ginkgo.By("Setting up a hamster deployment in ignored namespace")
|
||||
f.Namespace.Name = ignoredNamespace
|
||||
d := utils.NewNHamstersDeployment(f, 2)
|
||||
_ = utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up VPA for ignored namespace")
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
ignoredVpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(ignoredNamespace).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
WithScalingMode(container1Name, vpa_types.ContainerScalingModeOff).
|
||||
WithContainer(container2Name).
|
||||
Get()
|
||||
f.Namespace.Name = ignoredNamespace
|
||||
utils.InstallVPA(f, ignoredVpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment in included namespace")
|
||||
f.Namespace.Name = includedNamespace
|
||||
d = utils.NewNHamstersDeployment(f, 2)
|
||||
_ = utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up VPA for included namespace")
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(includedNamespace).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
WithScalingMode(container1Name, vpa_types.ContainerScalingModeOff).
|
||||
WithContainer(container2Name).
|
||||
Get()
|
||||
|
||||
f.Namespace.Name = includedNamespace
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled for just one container")
|
||||
vpa, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
errMsg := fmt.Sprintf("%s container has recommendations turned off. We expect expect only recommendations for %s",
|
||||
utils.GetHamsterContainerNameByIndex(0),
|
||||
utils.GetHamsterContainerNameByIndex(1))
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(1), errMsg)
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations[0].ContainerName).To(gomega.Equal(utils.GetHamsterContainerNameByIndex(1)), errMsg)
|
||||
|
||||
ginkgo.By("Ignored namespace should not be recommended")
|
||||
ignoredVpa, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalers(ignoredNamespace).Get(context.TODO(), ignoredVpaCRD.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(ignoredVpa.Status.Conditions).Should(gomega.HaveLen(0))
|
||||
}
|
||||
|
|
@ -0,0 +1,325 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscaling "k8s.io/api/autoscaling/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
const (
|
||||
recommenderComponent = "recommender"
|
||||
|
||||
// RecommenderDeploymentName is VPA recommender deployment name
|
||||
RecommenderDeploymentName = "vpa-recommender"
|
||||
// RecommenderNamespace is namespace to deploy VPA recommender
|
||||
RecommenderNamespace = "kube-system"
|
||||
// PollInterval is interval for polling
|
||||
PollInterval = 10 * time.Second
|
||||
// PollTimeout is timeout for polling
|
||||
PollTimeout = 15 * time.Minute
|
||||
|
||||
// DefaultHamsterReplicas is replicas of hamster deployment
|
||||
DefaultHamsterReplicas = int32(3)
|
||||
// DefaultHamsterBackoffLimit is BackoffLimit of hamster app
|
||||
DefaultHamsterBackoffLimit = int32(10)
|
||||
)
|
||||
|
||||
// HamsterTargetRef is CrossVersionObjectReference of hamster app
|
||||
var HamsterTargetRef = &autoscaling.CrossVersionObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "hamster-deployment",
|
||||
}
|
||||
|
||||
// RecommenderLabels are labels of VPA recommender
|
||||
var RecommenderLabels = map[string]string{"app": "vpa-recommender"}
|
||||
|
||||
// HamsterLabels are labels of hamster app
|
||||
var HamsterLabels = map[string]string{"app": "hamster"}
|
||||
|
||||
// SIGDescribe adds sig-autoscaling tag to test description.
|
||||
// Takes args that are passed to ginkgo.Describe.
|
||||
func SIGDescribe(scenario, name string, args ...interface{}) bool {
|
||||
full := fmt.Sprintf("[sig-autoscaling] [VPA] [%s] [v1] %s", scenario, name)
|
||||
return ginkgo.Describe(full, args...)
|
||||
}
|
||||
|
||||
// RecommenderE2eDescribe describes a VPA recommender e2e test.
|
||||
func RecommenderE2eDescribe(name string, args ...interface{}) bool {
|
||||
return SIGDescribe(recommenderComponent, name, args...)
|
||||
}
|
||||
|
||||
// GetHamsterContainerNameByIndex returns name of i-th hamster container.
|
||||
func GetHamsterContainerNameByIndex(i int) string {
|
||||
switch {
|
||||
case i < 0:
|
||||
panic("negative index")
|
||||
case i == 0:
|
||||
return "hamster"
|
||||
default:
|
||||
return fmt.Sprintf("hamster%d", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
// GetVpaClientSet return a VpaClientSet
|
||||
func GetVpaClientSet(f *framework.Framework) vpa_clientset.Interface {
|
||||
config, err := framework.LoadConfig()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework")
|
||||
return vpa_clientset.NewForConfigOrDie(config)
|
||||
}
|
||||
|
||||
// InstallVPA installs a VPA object in the test cluster.
|
||||
func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) {
|
||||
vpaClientSet := GetVpaClientSet(f)
|
||||
_, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Create(context.TODO(), vpa, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA")
|
||||
// apiserver ignore status in vpa create, so need to update status
|
||||
if !isStatusEmpty(&vpa.Status) {
|
||||
if vpa.Status.Recommendation != nil {
|
||||
PatchVpaRecommendation(f, vpa, vpa.Status.Recommendation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isStatusEmpty(status *vpa_types.VerticalPodAutoscalerStatus) bool {
|
||||
if status == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(status.Conditions) == 0 && status.Recommendation == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PatchRecord used for patch action
|
||||
type PatchRecord struct {
|
||||
Op string `json:"op,inline"`
|
||||
Path string `json:"path,inline"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// PatchVpaRecommendation installs a new recommendation for VPA object.
|
||||
func PatchVpaRecommendation(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler,
|
||||
recommendation *vpa_types.RecommendedPodResources) {
|
||||
newStatus := vpa.Status.DeepCopy()
|
||||
newStatus.Recommendation = recommendation
|
||||
bytes, err := json.Marshal([]PatchRecord{{
|
||||
Op: "replace",
|
||||
Path: "/status",
|
||||
Value: *newStatus,
|
||||
}})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
_, err = GetVpaClientSet(f).AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Patch(context.TODO(), vpa.Name, types.JSONPatchType, bytes, metav1.PatchOptions{}, "status")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch VPA.")
|
||||
}
|
||||
|
||||
// NewVPADeployment creates a VPA deployment with n containers
|
||||
// for e2e test purposes.
|
||||
func NewVPADeployment(f *framework.Framework, flags []string) *appsv1.Deployment {
|
||||
d := framework_deployment.NewDeployment(
|
||||
RecommenderDeploymentName, /*deploymentName*/
|
||||
1, /*replicas*/
|
||||
RecommenderLabels, /*podLabels*/
|
||||
"recommender", /*imageName*/
|
||||
"localhost:5001/vpa-recommender", /*image*/
|
||||
appsv1.RollingUpdateDeploymentStrategyType, /*strategyType*/
|
||||
)
|
||||
d.ObjectMeta.Namespace = f.Namespace.Name
|
||||
d.Spec.Template.Spec.Containers[0].ImagePullPolicy = apiv1.PullNever // Image must be loaded first
|
||||
d.Spec.Template.Spec.ServiceAccountName = "vpa-recommender"
|
||||
d.Spec.Template.Spec.Containers[0].Command = []string{"/recommender"}
|
||||
d.Spec.Template.Spec.Containers[0].Args = flags
|
||||
|
||||
runAsNonRoot := true
|
||||
var runAsUser int64 = 65534 // nobody
|
||||
d.Spec.Template.Spec.SecurityContext = &apiv1.PodSecurityContext{
|
||||
RunAsNonRoot: &runAsNonRoot,
|
||||
RunAsUser: &runAsUser,
|
||||
}
|
||||
|
||||
// Same as deploy/recommender-deployment.yaml
|
||||
d.Spec.Template.Spec.Containers[0].Resources = apiv1.ResourceRequirements{
|
||||
Limits: apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: resource.MustParse("200m"),
|
||||
apiv1.ResourceMemory: resource.MustParse("1000Mi"),
|
||||
},
|
||||
Requests: apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: resource.MustParse("50m"),
|
||||
apiv1.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
}
|
||||
|
||||
d.Spec.Template.Spec.Containers[0].Ports = []apiv1.ContainerPort{{
|
||||
Name: "prometheus",
|
||||
ContainerPort: 8942,
|
||||
}}
|
||||
|
||||
d.Spec.Template.Spec.Containers[0].LivenessProbe = &apiv1.Probe{
|
||||
ProbeHandler: apiv1.ProbeHandler{
|
||||
HTTPGet: &apiv1.HTTPGetAction{
|
||||
Path: "/health-check",
|
||||
Port: intstr.FromString("prometheus"),
|
||||
Scheme: apiv1.URISchemeHTTP,
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 5,
|
||||
PeriodSeconds: 10,
|
||||
FailureThreshold: 3,
|
||||
}
|
||||
d.Spec.Template.Spec.Containers[0].ReadinessProbe = &apiv1.Probe{
|
||||
ProbeHandler: apiv1.ProbeHandler{
|
||||
HTTPGet: &apiv1.HTTPGetAction{
|
||||
Path: "/health-check",
|
||||
Port: intstr.FromString("prometheus"),
|
||||
Scheme: apiv1.URISchemeHTTP,
|
||||
},
|
||||
},
|
||||
PeriodSeconds: 10,
|
||||
FailureThreshold: 3,
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// NewNHamstersDeployment creates a simple hamster deployment with n containers
|
||||
// for e2e test purposes.
|
||||
func NewNHamstersDeployment(f *framework.Framework, n int) *appsv1.Deployment {
|
||||
if n < 1 {
|
||||
panic("container count should be greater than 0")
|
||||
}
|
||||
d := framework_deployment.NewDeployment(
|
||||
"hamster-deployment", /*deploymentName*/
|
||||
DefaultHamsterReplicas, /*replicas*/
|
||||
HamsterLabels, /*podLabels*/
|
||||
GetHamsterContainerNameByIndex(0), /*imageName*/
|
||||
"registry.k8s.io/ubuntu-slim:0.14", /*image*/
|
||||
appsv1.RollingUpdateDeploymentStrategyType, /*strategyType*/
|
||||
)
|
||||
d.ObjectMeta.Namespace = f.Namespace.Name
|
||||
d.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"}
|
||||
d.Spec.Template.Spec.Containers[0].Args = []string{"-c", "/usr/bin/yes >/dev/null"}
|
||||
for i := 1; i < n; i++ {
|
||||
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
|
||||
d.Spec.Template.Spec.Containers[i].Name = GetHamsterContainerNameByIndex(i)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// StartDeploymentPods start and wait for a deployment to complete
|
||||
func StartDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {
|
||||
// Apiserver watch can lag depending on cached object count and apiserver resource usage.
|
||||
// We assume that watch can lag up to 5 seconds.
|
||||
const apiserverWatchLag = 5 * time.Second
|
||||
// In admission controller e2e tests a recommendation is created before deployment.
|
||||
// Creating deployment with size greater than 0 would create a race between information
|
||||
// about pods and information about deployment getting to the admission controller.
|
||||
// Any pods that get processed by AC before it receives information about the deployment
|
||||
// don't receive recommendation.
|
||||
// To avoid this create deployment with size 0, then scale it up to the desired size.
|
||||
desiredPodCount := *deployment.Spec.Replicas
|
||||
zero := int32(0)
|
||||
deployment.Spec.Replicas = &zero
|
||||
c, ns := f.ClientSet, f.Namespace.Name
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when creating deployment with size 0")
|
||||
|
||||
err = framework_deployment.WaitForDeploymentComplete(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for empty deployment to create")
|
||||
// If admission controller receives pod before controller it will not apply recommendation and test will fail.
|
||||
// Wait after creating deployment to ensure VPA knows about it, then scale up.
|
||||
// Normally watch lag is not a problem in terms of correctness:
|
||||
// - Mode "Auto": created pod without assigned resources will be handled by the eviction loop.
|
||||
// - Mode "Initial": calculating recommendations takes more than potential ectd lag.
|
||||
// - Mode "Off": pods are not handled by the admission controller.
|
||||
// In e2e admission controller tests we want to focus on scenarios without considering watch lag.
|
||||
// TODO(#2631): Remove sleep when issue is fixed.
|
||||
time.Sleep(apiserverWatchLag)
|
||||
|
||||
scale := autoscaling.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deployment.ObjectMeta.Name,
|
||||
Namespace: deployment.ObjectMeta.Namespace,
|
||||
},
|
||||
Spec: autoscaling.ScaleSpec{
|
||||
Replicas: desiredPodCount,
|
||||
},
|
||||
}
|
||||
afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deployment.Name, &scale, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(afterScale.Spec.Replicas).To(gomega.Equal(desiredPodCount), fmt.Sprintf("expected %d replicas after scaling", desiredPodCount))
|
||||
|
||||
// After scaling deployment we need to retrieve current version with updated replicas count.
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when getting scaled deployment")
|
||||
err = framework_deployment.WaitForDeploymentComplete(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for deployment to resize")
|
||||
|
||||
podList, err := framework_deployment.GetPodsForDeployment(context.TODO(), c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when listing pods after deployment resize")
|
||||
return podList
|
||||
}
|
||||
|
||||
// WaitForRecommendationPresent pools VPA object until recommendations are not empty. Returns
|
||||
// polled vpa object. On timeout returns error.
|
||||
func WaitForRecommendationPresent(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler) (*vpa_types.VerticalPodAutoscaler, error) {
|
||||
return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool {
|
||||
return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForVPAMatch pools VPA object until match function returns true. Returns
|
||||
// polled vpa object. On timeout returns error.
|
||||
func WaitForVPAMatch(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler, match func(vpa *vpa_types.VerticalPodAutoscaler) bool) (*vpa_types.VerticalPodAutoscaler, error) {
|
||||
var polledVpa *vpa_types.VerticalPodAutoscaler
|
||||
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
polledVpa, err = c.AutoscalingV1().VerticalPodAutoscalers(vpa.Namespace).Get(context.TODO(), vpa.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if match(polledVpa) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error waiting for recommendation present in %v: %v", vpa.Name, err)
|
||||
}
|
||||
return polledVpa, nil
|
||||
}
|
||||
|
|
@ -58,17 +58,17 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
|
||||
ginkgo.It("still applies recommendations on restart when update mode is InPlaceOrRecreate", func() {
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
|
||||
SetupHamsterDeployment(f, "100m", "100Mi", utils.DefaultHamsterReplicas)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
podSet := MakePodSet(podList)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD in mode InPlaceOrRecreate")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithUpdateMode(vpa_types.UpdateModeInPlaceOrRecreate).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
|
|
@ -80,7 +80,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
updatedCPURequest := ParseQuantityOrDie("200m")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||
|
|
@ -98,13 +98,13 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
foundUpdated += 1
|
||||
}
|
||||
}
|
||||
gomega.Expect(foundUpdated).To(gomega.Equal(defaultHamsterReplicas))
|
||||
gomega.Expect(foundUpdated).To(gomega.Equal(utils.DefaultHamsterReplicas))
|
||||
})
|
||||
|
||||
// TODO: add e2e test to verify metrics are getting updated
|
||||
ginkgo.It("applies in-place updates to all containers when update mode is InPlaceOrRecreate", func() {
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
d := NewNHamstersDeployment(f, 2 /*number of containers*/)
|
||||
d := utils.NewNHamstersDeployment(f, 2 /*number of containers*/)
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Requests = apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: ParseQuantityOrDie("100m"),
|
||||
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"),
|
||||
|
|
@ -115,9 +115,9 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
}
|
||||
targetCPU := "200m"
|
||||
targetMemory := "200Mi"
|
||||
_ = startDeploymentPods(f, d) // 3 replicas
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container2Name := GetHamsterContainerNameByIndex(1)
|
||||
_ = utils.StartDeploymentPods(f, d) // 3 replicas
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
WithContainer(container2Name).
|
||||
WithUpdateMode(vpa_types.UpdateModeInPlaceOrRecreate).
|
||||
|
|
@ -145,7 +145,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Checking that resources were modified due to in-place update, not due to evictions")
|
||||
err = WaitForPodsUpdatedWithoutEviction(f, podList)
|
||||
|
|
@ -183,12 +183,12 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
updatedCPU := "999" // infeasible target
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
WithUpdateMode(vpa_types.UpdateModeInPlaceOrRecreate).
|
||||
AppendRecommendation(
|
||||
|
|
@ -200,7 +200,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for pods to be evicted")
|
||||
err = WaitForPodsEvicted(f, podList)
|
||||
|
|
@ -215,7 +215,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
|
||||
// we can force deferred resize by setting the target CPU to the allocatable CPU of the node
|
||||
// it will be close enough to the node capacity, such that the kubelet defers instead of marking it infeasible
|
||||
|
|
@ -228,7 +228,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
WithUpdateMode(vpa_types.UpdateModeInPlaceOrRecreate).
|
||||
AppendRecommendation(
|
||||
|
|
@ -240,7 +240,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for status to be Deferred")
|
||||
gomega.Eventually(func() error {
|
||||
|
|
@ -283,14 +283,14 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
ginkgo.It("stops when pods get pending", func() {
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
|
||||
d := SetupHamsterDeployment(f, "100m", "100Mi", utils.DefaultHamsterReplicas)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD with ridiculous request")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -301,7 +301,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for pods to be restarted and stuck pending")
|
||||
err := assertPodsPendingForDuration(f.ClientSet, d, 1, 2*time.Minute)
|
||||
|
|
@ -311,18 +311,18 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
|
||||
ginkgo.It("never applies recommendations when update mode is Off", func() {
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
|
||||
d := SetupHamsterDeployment(f, "100m", "100Mi", utils.DefaultHamsterReplicas)
|
||||
cpuRequest := getCPURequest(d.Spec.Template.Spec)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
podSet := MakePodSet(podList)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD in mode Off")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithUpdateMode(vpa_types.UpdateModeOff).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
|
|
@ -334,7 +334,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||
CheckNoPodsEvicted(f, podSet)
|
||||
|
|
@ -350,17 +350,17 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
|
||||
ginkgo.It("applies recommendations only on restart when update mode is Initial", func() {
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
|
||||
SetupHamsterDeployment(f, "100m", "100Mi", utils.DefaultHamsterReplicas)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
podSet := MakePodSet(podList)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD in mode Initial")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithUpdateMode(vpa_types.UpdateModeInitial).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
|
|
@ -372,7 +372,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
updatedCPURequest := ParseQuantityOrDie("200m")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||
|
|
@ -468,11 +468,11 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
pdb := setupPDB(f, "hamster-pdb", 0 /* maxUnavailable */)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -483,7 +483,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||
CheckNoPodsEvicted(f, podSet)
|
||||
|
|
@ -513,14 +513,14 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -531,7 +531,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
|
||||
// recommendation is 200m
|
||||
|
|
@ -547,14 +547,14 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -565,7 +565,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min
|
||||
// request is 100m request and 300m limit
|
||||
|
|
@ -583,15 +583,15 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
|
||||
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container2Name := GetHamsterContainerNameByIndex(1)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -610,7 +610,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m,
|
||||
// while recommendation is 200m
|
||||
|
|
@ -629,14 +629,14 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
|
||||
container2Name := "hamster2"
|
||||
d.Spec.Template.Spec.Containers[1].Name = container2Name
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -655,7 +655,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both
|
||||
// to limit and request so min request is 100m request and 300m limit
|
||||
|
|
@ -696,12 +696,12 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
defer webhookCleanup()
|
||||
|
||||
ginkgo.By("Setting up a hamster vpa")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container2Name := GetHamsterContainerNameByIndex(1)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -720,12 +720,12 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
|
||||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m"), ParseQuantityOrDie("100Mi"))
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
for _, pod := range podList.Items {
|
||||
observedContainers, ok := pod.GetAnnotations()[annotations.VpaObservedContainersLabel]
|
||||
gomega.Expect(ok).To(gomega.Equal(true))
|
||||
|
|
@ -760,7 +760,7 @@ func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Depl
|
|||
|
||||
pendingPods := make(map[string]time.Time)
|
||||
|
||||
err := wait.PollUntilContextTimeout(context.Background(), pollInterval, pollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
err := wait.PollUntilContextTimeout(context.Background(), utils.PollInterval, utils.PollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
currentPodList, err := framework_deployment.GetPodsForDeployment(ctx, c, deployment)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -815,12 +815,12 @@ func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Depl
|
|||
|
||||
func testEvictsReplicatedPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) {
|
||||
ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind))
|
||||
setupHamsterController(f, controller.Kind, "100m", "100Mi", defaultHamsterReplicas)
|
||||
setupHamsterController(f, controller.Kind, "100m", "100Mi", utils.DefaultHamsterReplicas)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -835,7 +835,7 @@ func testEvictsReplicatedPods(f *framework.Framework, controller *autoscaling.Cr
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for pods to be evicted")
|
||||
err = WaitForPodsEvicted(f, podList)
|
||||
|
|
@ -849,7 +849,7 @@ func testDoesNotEvictSingletonPodByDefault(f *framework.Framework, controller *a
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -864,7 +864,7 @@ func testDoesNotEvictSingletonPodByDefault(f *framework.Framework, controller *a
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// No eviction is expected with the default settings of VPA object
|
||||
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||
|
|
@ -879,7 +879,7 @@ func testEvictsSingletonPodWhenConfigured(f *framework.Framework, controller *au
|
|||
|
||||
// Prepare the VPA to allow single-Pod eviction.
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -895,7 +895,7 @@ func testEvictsSingletonPodWhenConfigured(f *framework.Framework, controller *au
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for pods to be evicted")
|
||||
err = WaitForPodsEvicted(f, podList)
|
||||
|
|
@ -927,7 +927,7 @@ func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory
|
|||
|
||||
func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) {
|
||||
hamsterContainer := SetupHamsterContainer(cpu, memory)
|
||||
rc := framework_rc.ByNameContainer("hamster-rc", replicas, hamsterLabels, hamsterContainer, nil)
|
||||
rc := framework_rc.ByNameContainer("hamster-rc", replicas, utils.HamsterLabels, hamsterContainer, nil)
|
||||
|
||||
rc.Namespace = f.Namespace.Name
|
||||
err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc)
|
||||
|
|
@ -937,7 +937,7 @@ func setupHamsterReplicationController(f *framework.Framework, cpu, memory strin
|
|||
}
|
||||
|
||||
func waitForRCPodsRunning(f *framework.Framework, rc *apiv1.ReplicationController) error {
|
||||
return wait.PollUntilContextTimeout(context.Background(), pollInterval, pollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
return wait.PollUntilContextTimeout(context.Background(), utils.PollInterval, utils.PollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
podList, err := GetHamsterPods(f)
|
||||
if err != nil {
|
||||
framework.Logf("Error listing pods, retrying: %v", err)
|
||||
|
|
@ -957,7 +957,7 @@ func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32)
|
|||
job := framework_job.NewTestJob("notTerminate", "hamster-job", apiv1.RestartPolicyOnFailure,
|
||||
replicas, replicas, nil, 10)
|
||||
job.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
|
||||
for label, value := range hamsterLabels {
|
||||
for label, value := range utils.HamsterLabels {
|
||||
job.Spec.Template.Labels[label] = value
|
||||
}
|
||||
_, err := framework_job.CreateJob(context.TODO(), f.ClientSet, f.Namespace.Name, job)
|
||||
|
|
@ -967,7 +967,7 @@ func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32)
|
|||
}
|
||||
|
||||
func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) {
|
||||
rs := newReplicaSet("hamster-rs", f.Namespace.Name, replicas, hamsterLabels, "", "")
|
||||
rs := newReplicaSet("hamster-rs", f.Namespace.Name, replicas, utils.HamsterLabels, "", "")
|
||||
rs.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
|
||||
err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
|
@ -977,7 +977,7 @@ func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32)
|
|||
|
||||
func setupHamsterStateful(f *framework.Framework, cpu, memory string, replicas int32) {
|
||||
stateful := framework_ss.NewStatefulSet("hamster-stateful", f.Namespace.Name,
|
||||
"hamster-service", replicas, nil, nil, hamsterLabels)
|
||||
"hamster-service", replicas, nil, nil, utils.HamsterLabels)
|
||||
|
||||
stateful.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
|
||||
err := createStatefulSetSetWithRetries(f.ClientSet, f.Namespace.Name, stateful)
|
||||
|
|
@ -994,7 +994,7 @@ func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1
|
|||
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||
MaxUnavailable: &maxUnavailableIntstr,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: hamsterLabels,
|
||||
MatchLabels: utils.HamsterLabels,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,14 +23,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
podsecurity "k8s.io/pod-security-admission/api"
|
||||
|
||||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
|
|
@ -54,11 +52,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
WithUpdateMode(vpa_types.UpdateModeInPlaceOrRecreate).
|
||||
AppendRecommendation(
|
||||
|
|
@ -70,10 +68,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -87,11 +85,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -102,10 +100,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -120,11 +118,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
removedContainerName := "removed"
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(removedContainerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -143,10 +141,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -164,7 +162,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(removedContainerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -175,10 +173,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -189,7 +187,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
})
|
||||
|
||||
ginkgo.It("starts pod with recommendation when one container has a recommendation and one other one doesn't", func() {
|
||||
d := NewNHamstersDeployment(f, 2)
|
||||
d := utils.NewNHamstersDeployment(f, 2)
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Requests = apiv1.ResourceList{
|
||||
apiv1.ResourceCPU: ParseQuantityOrDie("100m"),
|
||||
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"),
|
||||
|
|
@ -200,11 +198,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
}
|
||||
framework.Logf("Created hamster deployment %v", d)
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -215,10 +213,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -235,12 +233,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container2Name := GetHamsterContainerNameByIndex(1)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -259,10 +257,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -279,11 +277,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -294,10 +292,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 250m CPU and 200Mi of memory.
|
||||
|
|
@ -308,7 +306,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
})
|
||||
|
||||
ginkgo.It("starts pod with default request when one container has a recommendation and one other one doesn't when a limit range applies", func() {
|
||||
d := NewNHamstersDeployment(f, 2)
|
||||
d := utils.NewNHamstersDeployment(f, 2)
|
||||
InstallLimitRangeWithMax(f, "400m", "1Gi", apiv1.LimitTypePod)
|
||||
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Requests = apiv1.ResourceList{
|
||||
|
|
@ -329,11 +327,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
}
|
||||
framework.Logf("Created hamster deployment %v", d)
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -344,10 +342,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally both containers in each Pod had 400m CPU (one from
|
||||
// recommendation the other one from request), 600Mi of memory (similarly),
|
||||
|
|
@ -365,11 +363,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -380,10 +378,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Verifying hamster deployment")
|
||||
for i, pod := range podList.Items {
|
||||
|
|
@ -395,7 +393,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
}
|
||||
|
||||
ginkgo.By("Modifying recommendation.")
|
||||
PatchVpaRecommendation(f, vpaCRD, &vpa_types.RecommendedPodResources{
|
||||
utils.PatchVpaRecommendation(f, vpaCRD, &vpa_types.RecommendedPodResources{
|
||||
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
|
||||
ContainerName: "hamster",
|
||||
Target: apiv1.ResourceList{
|
||||
|
|
@ -414,11 +412,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -429,10 +427,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal.
|
||||
|
|
@ -450,11 +448,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -465,10 +463,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged.
|
||||
|
|
@ -486,11 +484,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
ParseQuantityOrDie("500m") /*cpu limit*/, ParseQuantityOrDie("500Mi") /*memory limit*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
WithControlledValues(containerName, vpa_types.ContainerControlledValuesRequestsOnly).
|
||||
AppendRecommendation(
|
||||
|
|
@ -502,10 +500,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to 250m CPU and 200Mi of memory. Limits should stay unchanged.
|
||||
|
|
@ -527,11 +525,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResourcesAndLimits(f, startCpuRequest, startMemRequest, startCpuLimit, startMemLimit)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -542,7 +540,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
|
||||
// recommendation is 250m
|
||||
|
|
@ -551,7 +549,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
InstallLimitRangeWithMax(f, maxCpu.String(), "1Gi", apiv1.LimitTypeContainer)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Verifying hamster deployment")
|
||||
for i, pod := range podList.Items {
|
||||
|
|
@ -585,11 +583,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -600,7 +598,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min
|
||||
// request is 50m and min limit is 75
|
||||
|
|
@ -609,7 +607,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
|
||||
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
|
||||
|
|
@ -634,11 +632,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d.Spec.Template.Spec.Containers[1].Name = container2Name
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -657,7 +655,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m,
|
||||
// while recommendation is 250m
|
||||
|
|
@ -665,7 +663,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to 200m CPU (as this is the recommendation
|
||||
|
|
@ -690,11 +688,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d.Spec.Template.Spec.Containers[1].Name = container2Name
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -713,7 +711,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
// Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and
|
||||
// request so min request is 50m and min limit is 75
|
||||
|
|
@ -722,7 +720,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
|
||||
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
|
||||
|
|
@ -742,11 +740,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -758,10 +756,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
WithMaxAllowed(containerName, "233m", "150Mi").
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to 233m CPU and 150Mi of memory (as this is the recommendation
|
||||
|
|
@ -776,11 +774,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
AppendRecommendation(
|
||||
test.Recommendation().
|
||||
|
|
@ -792,10 +790,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
WithMinAllowed(containerName, "90m", "80Mi").
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||
// should change it to recommended 90m CPU and 800Mi of memory (as this the
|
||||
|
|
@ -810,18 +808,18 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// VPA has no recommendation, so user's request is passed through
|
||||
for _, pod := range podList.Items {
|
||||
|
|
@ -834,18 +832,18 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
d := NewHamsterDeployment(f)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
podList := startDeploymentPods(f, d)
|
||||
podList := utils.StartDeploymentPods(f, d)
|
||||
|
||||
// VPA has no recommendation, deployment has no request specified
|
||||
for _, pod := range podList.Items {
|
||||
|
|
@ -951,59 +949,6 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", ginkgo.Label("FG:
|
|||
})
|
||||
})
|
||||
|
||||
func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {
|
||||
// Apiserver watch can lag depending on cached object count and apiserver resource usage.
|
||||
// We assume that watch can lag up to 5 seconds.
|
||||
const apiserverWatchLag = 5 * time.Second
|
||||
// In admission controller e2e tests a recommendation is created before deployment.
|
||||
// Creating deployment with size greater than 0 would create a race between information
|
||||
// about pods and information about deployment getting to the admission controller.
|
||||
// Any pods that get processed by AC before it receives information about the deployment
|
||||
// don't receive recommendation.
|
||||
// To avoid this create deployment with size 0, then scale it up to the desired size.
|
||||
desiredPodCount := *deployment.Spec.Replicas
|
||||
zero := int32(0)
|
||||
deployment.Spec.Replicas = &zero
|
||||
c, ns := f.ClientSet, f.Namespace.Name
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when creating deployment with size 0")
|
||||
|
||||
err = framework_deployment.WaitForDeploymentComplete(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for empty deployment to create")
|
||||
// If admission controller receives pod before controller it will not apply recommendation and test will fail.
|
||||
// Wait after creating deployment to ensure VPA knows about it, then scale up.
|
||||
// Normally watch lag is not a problem in terms of correctness:
|
||||
// - Mode "Auto": created pod without assigned resources will be handled by the eviction loop.
|
||||
// - Mode "Initial": calculating recommendations takes more than potential ectd lag.
|
||||
// - Mode "Off": pods are not handled by the admission controller.
|
||||
// In e2e admission controller tests we want to focus on scenarios without considering watch lag.
|
||||
// TODO(#2631): Remove sleep when issue is fixed.
|
||||
time.Sleep(apiserverWatchLag)
|
||||
|
||||
scale := autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deployment.ObjectMeta.Name,
|
||||
Namespace: deployment.ObjectMeta.Namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: desiredPodCount,
|
||||
},
|
||||
}
|
||||
afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deployment.Name, &scale, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(afterScale.Spec.Replicas).To(gomega.Equal(desiredPodCount), fmt.Sprintf("expected %d replicas after scaling", desiredPodCount))
|
||||
|
||||
// After scaling deployment we need to retrieve current version with updated replicas count.
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when getting scaled deployment")
|
||||
err = framework_deployment.WaitForDeploymentComplete(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for deployment to resize")
|
||||
|
||||
podList, err := framework_deployment.GetPodsForDeployment(context.TODO(), c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when listing pods after deployment resize")
|
||||
return podList
|
||||
}
|
||||
|
||||
func waitForVpaWebhookRegistration(f *framework.Framework) {
|
||||
ginkgo.By("Waiting for VPA webhook registration")
|
||||
gomega.Eventually(func() bool {
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ import (
|
|||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscaling "k8s.io/api/autoscaling/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
|
@ -35,6 +34,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
|
@ -43,13 +43,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
recommenderComponent = "recommender"
|
||||
updateComponent = "updater"
|
||||
admissionControllerComponent = "admission-controller"
|
||||
fullVpaSuite = "full-vpa"
|
||||
actuationSuite = "actuation"
|
||||
pollInterval = 10 * time.Second
|
||||
pollTimeout = 15 * time.Minute
|
||||
cronJobsWaitTimeout = 15 * time.Minute
|
||||
// VpaEvictionTimeout is a timeout for VPA to restart a pod if there are no
|
||||
// mechanisms blocking it (for example PDB).
|
||||
|
|
@ -58,63 +55,28 @@ const (
|
|||
// pod, if there are no mechanisms blocking it.
|
||||
VpaInPlaceTimeout = 2 * time.Minute
|
||||
|
||||
defaultHamsterReplicas = int32(3)
|
||||
defaultHamsterBackoffLimit = int32(10)
|
||||
|
||||
// VpaNamespace is the default namespace that holds the all the VPA components.
|
||||
VpaNamespace = "kube-system"
|
||||
)
|
||||
|
||||
var hamsterTargetRef = &autoscaling.CrossVersionObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "hamster-deployment",
|
||||
}
|
||||
|
||||
var hamsterLabels = map[string]string{"app": "hamster"}
|
||||
|
||||
// SIGDescribe adds sig-autoscaling tag to test description.
|
||||
// Takes args that are passed to ginkgo.Describe.
|
||||
func SIGDescribe(scenario, name string, args ...interface{}) bool {
|
||||
full := fmt.Sprintf("[sig-autoscaling] [VPA] [%s] [v1] %s", scenario, name)
|
||||
return ginkgo.Describe(full, args...)
|
||||
}
|
||||
|
||||
// RecommenderE2eDescribe describes a VPA recommender e2e test.
|
||||
func RecommenderE2eDescribe(name string, args ...interface{}) bool {
|
||||
return SIGDescribe(recommenderComponent, name, args...)
|
||||
}
|
||||
|
||||
// UpdaterE2eDescribe describes a VPA updater e2e test.
|
||||
func UpdaterE2eDescribe(name string, args ...interface{}) bool {
|
||||
return SIGDescribe(updateComponent, name, args...)
|
||||
return utils.SIGDescribe(updateComponent, name, args...)
|
||||
}
|
||||
|
||||
// AdmissionControllerE2eDescribe describes a VPA admission controller e2e test.
|
||||
func AdmissionControllerE2eDescribe(name string, args ...interface{}) bool {
|
||||
return SIGDescribe(admissionControllerComponent, name, args...)
|
||||
return utils.SIGDescribe(admissionControllerComponent, name, args...)
|
||||
}
|
||||
|
||||
// FullVpaE2eDescribe describes a VPA full stack e2e test.
|
||||
func FullVpaE2eDescribe(name string, args ...interface{}) bool {
|
||||
return SIGDescribe(fullVpaSuite, name, args...)
|
||||
return utils.SIGDescribe(fullVpaSuite, name, args...)
|
||||
}
|
||||
|
||||
// ActuationSuiteE2eDescribe describes a VPA actuation e2e test.
|
||||
func ActuationSuiteE2eDescribe(name string, args ...interface{}) bool {
|
||||
return SIGDescribe(actuationSuite, name, args...)
|
||||
}
|
||||
|
||||
// GetHamsterContainerNameByIndex returns name of i-th hamster container.
|
||||
func GetHamsterContainerNameByIndex(i int) string {
|
||||
switch {
|
||||
case i < 0:
|
||||
panic("negative index")
|
||||
case i == 0:
|
||||
return "hamster"
|
||||
default:
|
||||
return fmt.Sprintf("hamster%d", i+1)
|
||||
}
|
||||
return utils.SIGDescribe(actuationSuite, name, args...)
|
||||
}
|
||||
|
||||
// SetupHamsterDeployment creates and installs a simple hamster deployment
|
||||
|
|
@ -134,31 +96,7 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas
|
|||
|
||||
// NewHamsterDeployment creates a simple hamster deployment for e2e test purposes.
|
||||
func NewHamsterDeployment(f *framework.Framework) *appsv1.Deployment {
|
||||
return NewNHamstersDeployment(f, 1)
|
||||
}
|
||||
|
||||
// NewNHamstersDeployment creates a simple hamster deployment with n containers
|
||||
// for e2e test purposes.
|
||||
func NewNHamstersDeployment(f *framework.Framework, n int) *appsv1.Deployment {
|
||||
if n < 1 {
|
||||
panic("container count should be greater than 0")
|
||||
}
|
||||
d := framework_deployment.NewDeployment(
|
||||
"hamster-deployment", /*deploymentName*/
|
||||
defaultHamsterReplicas, /*replicas*/
|
||||
hamsterLabels, /*podLabels*/
|
||||
GetHamsterContainerNameByIndex(0), /*imageName*/
|
||||
"registry.k8s.io/ubuntu-slim:0.14", /*image*/
|
||||
appsv1.RollingUpdateDeploymentStrategyType, /*strategyType*/
|
||||
)
|
||||
d.ObjectMeta.Namespace = f.Namespace.Name
|
||||
d.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"}
|
||||
d.Spec.Template.Spec.Containers[0].Args = []string{"-c", "/usr/bin/yes >/dev/null"}
|
||||
for i := 1; i < n; i++ {
|
||||
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
|
||||
d.Spec.Template.Spec.Containers[i].Name = GetHamsterContainerNameByIndex(i)
|
||||
}
|
||||
return d
|
||||
return utils.NewNHamstersDeployment(f, 1)
|
||||
}
|
||||
|
||||
// NewHamsterDeploymentWithResources creates a simple hamster deployment with specific
|
||||
|
|
@ -203,16 +141,16 @@ func getPodSelectorExcludingDonePodsOrDie() string {
|
|||
return selector.String()
|
||||
}
|
||||
|
||||
// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
|
||||
// GetHamsterPods returns running hamster pods (matched by utils.HamsterLabels)
|
||||
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
|
||||
label := labels.SelectorFromSet(labels.Set(utils.HamsterLabels))
|
||||
options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: getPodSelectorExcludingDonePodsOrDie()}
|
||||
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), options)
|
||||
}
|
||||
|
||||
// NewTestCronJob returns a CronJob for test purposes.
|
||||
func NewTestCronJob(name, schedule string, replicas int32) *batchv1.CronJob {
|
||||
backoffLimit := defaultHamsterBackoffLimit
|
||||
backoffLimit := utils.DefaultHamsterBackoffLimit
|
||||
sj := &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
|
@ -266,7 +204,7 @@ func getCronJob(c clientset.Interface, ns, name string) (*batchv1.CronJob, error
|
|||
func SetupHamsterCronJob(f *framework.Framework, schedule, cpu, memory string, replicas int32) {
|
||||
cronJob := NewTestCronJob("hamster-cronjob", schedule, replicas)
|
||||
cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers = []apiv1.Container{SetupHamsterContainer(cpu, memory)}
|
||||
for label, value := range hamsterLabels {
|
||||
for label, value := range utils.HamsterLabels {
|
||||
cronJob.Spec.JobTemplate.Spec.Template.Labels[label] = value
|
||||
}
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
|
|
@ -294,45 +232,9 @@ func SetupHamsterContainer(cpu, memory string) apiv1.Container {
|
|||
}
|
||||
}
|
||||
|
||||
type patchRecord struct {
|
||||
Op string `json:"op,inline"`
|
||||
Path string `json:"path,inline"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
func getVpaClientSet(f *framework.Framework) vpa_clientset.Interface {
|
||||
config, err := framework.LoadConfig()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework")
|
||||
return vpa_clientset.NewForConfigOrDie(config)
|
||||
}
|
||||
|
||||
// InstallVPA installs a VPA object in the test cluster.
|
||||
func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) {
|
||||
vpaClientSet := getVpaClientSet(f)
|
||||
_, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Create(context.TODO(), vpa, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA")
|
||||
// apiserver ignore status in vpa create, so need to update status
|
||||
if !isStatusEmpty(&vpa.Status) {
|
||||
if vpa.Status.Recommendation != nil {
|
||||
PatchVpaRecommendation(f, vpa, vpa.Status.Recommendation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isStatusEmpty(status *vpa_types.VerticalPodAutoscalerStatus) bool {
|
||||
if status == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(status.Conditions) == 0 && status.Recommendation == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// InstallRawVPA installs a VPA object passed in as raw json in the test cluster.
|
||||
func InstallRawVPA(f *framework.Framework, obj interface{}) error {
|
||||
vpaClientSet := getVpaClientSet(f)
|
||||
vpaClientSet := utils.GetVpaClientSet(f)
|
||||
err := vpaClientSet.AutoscalingV1().RESTClient().Post().
|
||||
Namespace(f.Namespace.Name).
|
||||
Resource("verticalpodautoscalers").
|
||||
|
|
@ -341,24 +243,9 @@ func InstallRawVPA(f *framework.Framework, obj interface{}) error {
|
|||
return err.Error()
|
||||
}
|
||||
|
||||
// PatchVpaRecommendation installs a new recommendation for VPA object.
|
||||
func PatchVpaRecommendation(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler,
|
||||
recommendation *vpa_types.RecommendedPodResources) {
|
||||
newStatus := vpa.Status.DeepCopy()
|
||||
newStatus.Recommendation = recommendation
|
||||
bytes, err := json.Marshal([]patchRecord{{
|
||||
Op: "replace",
|
||||
Path: "/status",
|
||||
Value: *newStatus,
|
||||
}})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
_, err = getVpaClientSet(f).AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Patch(context.TODO(), vpa.Name, types.JSONPatchType, bytes, metav1.PatchOptions{}, "status")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch VPA.")
|
||||
}
|
||||
|
||||
// AnnotatePod adds annotation for an existing pod.
|
||||
func AnnotatePod(f *framework.Framework, podName, annotationName, annotationValue string) {
|
||||
bytes, err := json.Marshal([]patchRecord{{
|
||||
bytes, err := json.Marshal([]utils.PatchRecord{{
|
||||
Op: "add",
|
||||
Path: fmt.Sprintf("/metadata/annotations/%v", annotationName),
|
||||
Value: annotationValue,
|
||||
|
|
@ -395,7 +282,7 @@ func MakePodSet(pods *apiv1.PodList) PodSet {
|
|||
func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error {
|
||||
initialPodSet := MakePodSet(podList)
|
||||
|
||||
return wait.PollUntilContextTimeout(context.Background(), pollInterval, pollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
return wait.PollUntilContextTimeout(context.Background(), utils.PollInterval, utils.PollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
currentPodList, err := GetHamsterPods(f)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -409,7 +296,7 @@ func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error
|
|||
func WaitForPodsEvicted(f *framework.Framework, podList *apiv1.PodList) error {
|
||||
initialPodSet := MakePodSet(podList)
|
||||
|
||||
return wait.PollUntilContextTimeout(context.Background(), pollInterval, pollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
return wait.PollUntilContextTimeout(context.Background(), utils.PollInterval, utils.PollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
currentPodList, err := GetHamsterPods(f)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -459,41 +346,10 @@ func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) {
|
|||
gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions")
|
||||
}
|
||||
|
||||
// WaitForVPAMatch pools VPA object until match function returns true. Returns
|
||||
// polled vpa object. On timeout returns error.
|
||||
func WaitForVPAMatch(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler, match func(vpa *vpa_types.VerticalPodAutoscaler) bool) (*vpa_types.VerticalPodAutoscaler, error) {
|
||||
var polledVpa *vpa_types.VerticalPodAutoscaler
|
||||
err := wait.PollUntilContextTimeout(context.Background(), pollInterval, pollTimeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
polledVpa, err = c.AutoscalingV1().VerticalPodAutoscalers(vpa.Namespace).Get(context.TODO(), vpa.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if match(polledVpa) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error waiting for recommendation present in %v: %v", vpa.Name, err)
|
||||
}
|
||||
return polledVpa, nil
|
||||
}
|
||||
|
||||
// WaitForRecommendationPresent pools VPA object until recommendations are not empty. Returns
|
||||
// polled vpa object. On timeout returns error.
|
||||
func WaitForRecommendationPresent(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler) (*vpa_types.VerticalPodAutoscaler, error) {
|
||||
return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool {
|
||||
return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForUncappedCPURecommendationAbove pools VPA object until uncapped recommendation is above specified value.
|
||||
// Returns polled VPA object. On timeout returns error.
|
||||
func WaitForUncappedCPURecommendationAbove(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler, minMilliCPU int64) (*vpa_types.VerticalPodAutoscaler, error) {
|
||||
return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool {
|
||||
return utils.WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool {
|
||||
if vpa.Status.Recommendation == nil || len(vpa.Status.Recommendation.ContainerRecommendations) == 0 {
|
||||
return false
|
||||
}
|
||||
|
|
@ -565,7 +421,7 @@ func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimi
|
|||
// TODO: Use events to track in-place resizes instead of polling when ready: https://github.com/kubernetes/kubernetes/issues/127172
|
||||
func WaitForPodsUpdatedWithoutEviction(f *framework.Framework, initialPods *apiv1.PodList) error {
|
||||
framework.Logf("waiting for at least one pod to be updated without eviction")
|
||||
err := wait.PollUntilContextTimeout(context.TODO(), pollInterval, VpaInPlaceTimeout, false, func(context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(context.TODO(), utils.PollInterval, VpaInPlaceTimeout, false, func(context.Context) (bool, error) {
|
||||
podList, err := GetHamsterPods(f)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
|
@ -82,7 +83,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
Name: "hamster",
|
||||
}
|
||||
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -98,20 +99,20 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
})
|
||||
|
||||
ginkgo.It("have cpu requests growing with usage", func() {
|
||||
// initial CPU usage is low so a minimal recommendation is expected
|
||||
err := waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// consume more CPU to get a higher recommendation
|
||||
rc.ConsumeCPU(600 * replicas)
|
||||
err = waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie("500m"), ParseQuantityOrDie("1300m"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
|
@ -119,7 +120,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
ginkgo.It("have memory requests growing with usage", func() {
|
||||
// initial memory usage is low so a minimal recommendation is expected
|
||||
err := waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
ParseQuantityOrDie(minimalMemoryLowerBound), ParseQuantityOrDie(minimalMemoryUpperBound))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
|
|
@ -127,7 +128,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
// NOTE: large range given due to unpredictability of actual memory usage
|
||||
rc.ConsumeMem(1024 * replicas)
|
||||
err = waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
ParseQuantityOrDie("900Mi"), ParseQuantityOrDie("4000Mi"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
|
@ -154,7 +155,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
Name: "hamster",
|
||||
}
|
||||
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -169,20 +170,20 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
})
|
||||
|
||||
ginkgo.It("have cpu requests growing with usage", func() {
|
||||
// initial CPU usage is low so a minimal recommendation is expected
|
||||
err := waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// consume more CPU to get a higher recommendation
|
||||
rc.ConsumeCPU(600 * replicas)
|
||||
err = waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie("500m"), ParseQuantityOrDie("1300m"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
|
@ -190,7 +191,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
ginkgo.It("have memory requests growing with usage", func() {
|
||||
// initial memory usage is low so a minimal recommendation is expected
|
||||
err := waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
ParseQuantityOrDie(minimalMemoryLowerBound), ParseQuantityOrDie(minimalMemoryUpperBound))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
|
|
@ -198,7 +199,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
|
|||
// NOTE: large range given due to unpredictability of actual memory usage
|
||||
rc.ConsumeMem(1024 * replicas)
|
||||
err = waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
|
||||
ParseQuantityOrDie("900Mi"), ParseQuantityOrDie("4000Mi"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
|
@ -240,7 +241,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA with default recommender explicitly c
|
|||
Name: "hamster",
|
||||
}
|
||||
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -255,21 +256,21 @@ var _ = FullVpaE2eDescribe("Pods under VPA with default recommender explicitly c
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("have cpu requests growing with usage", func() {
|
||||
// initial CPU usage is low so a minimal recommendation is expected
|
||||
err := waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// consume more CPU to get a higher recommendation
|
||||
rc.ConsumeCPU(600 * replicas)
|
||||
err = waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie("500m"), ParseQuantityOrDie("1300m"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
|
@ -310,7 +311,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA with non-recognized recommender expli
|
|||
Name: "hamster",
|
||||
}
|
||||
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithRecommender("non-recognized").
|
||||
|
|
@ -326,20 +327,20 @@ var _ = FullVpaE2eDescribe("Pods under VPA with non-recognized recommender expli
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("deployment not updated by non-recognized recommender", func() {
|
||||
err := waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// consume more CPU to get a higher recommendation
|
||||
rc.ConsumeCPU(600 * replicas)
|
||||
err = waitForResourceRequestInRangeInPods(
|
||||
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
f, utils.PollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
|
||||
ParseQuantityOrDie("500m"), ParseQuantityOrDie("1000m"))
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
})
|
||||
|
|
@ -367,7 +368,7 @@ var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() {
|
|||
Name: "hamster",
|
||||
}
|
||||
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -375,7 +376,7 @@ var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() {
|
|||
WithContainer(containerName).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
})
|
||||
|
||||
ginkgo.It("have memory requests growing with OOMs", func() {
|
||||
|
|
@ -391,7 +392,7 @@ var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() {
|
|||
})
|
||||
|
||||
func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error {
|
||||
return wait.PollUntilContextTimeout(context.Background(), pollInterval, timeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
return wait.PollUntilContextTimeout(context.Background(), utils.PollInterval, timeout, true, func(ctx context.Context) (done bool, err error) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
|
||||
|
|
@ -119,13 +120,13 @@ func getVpaObserver(vpaClientSet vpa_clientset.Interface, namespace string) *obs
|
|||
return &vpaObserver
|
||||
}
|
||||
|
||||
var _ = RecommenderE2eDescribe("Checkpoints", func() {
|
||||
var _ = utils.RecommenderE2eDescribe("Checkpoints", func() {
|
||||
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline
|
||||
|
||||
ginkgo.It("with missing VPA objects are garbage collected", func() {
|
||||
ns := f.Namespace.Name
|
||||
vpaClientSet := getVpaClientSet(f)
|
||||
vpaClientSet := utils.GetVpaClientSet(f)
|
||||
|
||||
checkpoint := vpa_types.VerticalPodAutoscalerCheckpoint{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -159,15 +160,15 @@ var _ = RecommenderE2eDescribe("Checkpoints", func() {
|
|||
})
|
||||
})
|
||||
|
||||
var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
var _ = utils.RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline
|
||||
|
||||
ginkgo.It("serves recommendation for CronJob", func() {
|
||||
ginkgo.By("Setting up hamster CronJob")
|
||||
SetupHamsterCronJob(f, "*/5 * * * *", "100m", "100Mi", defaultHamsterReplicas)
|
||||
SetupHamsterCronJob(f, "*/5 * * * *", "100m", "100Mi", utils.DefaultHamsterReplicas)
|
||||
|
||||
vpaClientSet := getVpaClientSet(f)
|
||||
vpaClientSet := utils.GetVpaClientSet(f)
|
||||
|
||||
ginkgo.By("Setting up VPA")
|
||||
targetRef := &autoscaling.CrossVersionObjectReference{
|
||||
|
|
@ -176,7 +177,7 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
Name: "hamster-cronjob",
|
||||
}
|
||||
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -184,15 +185,15 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
WithContainer(containerName).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled")
|
||||
_, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
_, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
var _ = utils.RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline
|
||||
|
||||
|
|
@ -211,22 +212,22 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
)
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD = test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
vpaClientSet = getVpaClientSet(f)
|
||||
vpaClientSet = utils.GetVpaClientSet(f)
|
||||
})
|
||||
|
||||
ginkgo.It("serves recommendation", func() {
|
||||
ginkgo.By("Waiting for recommendation to be filled")
|
||||
_, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
_, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
|
|
@ -235,7 +236,7 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
o := getVpaObserver(vpaClientSet, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled")
|
||||
_, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
_, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ginkgo.By("Drain diffs")
|
||||
out:
|
||||
|
|
@ -270,7 +271,7 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
})
|
||||
})
|
||||
|
||||
var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
var _ = utils.RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline
|
||||
|
||||
|
|
@ -287,26 +288,26 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
1, /* number of replicas */
|
||||
)
|
||||
|
||||
vpaClientSet = getVpaClientSet(f)
|
||||
vpaClientSet = utils.GetVpaClientSet(f)
|
||||
})
|
||||
|
||||
ginkgo.It("respects min allowed recommendation", func() {
|
||||
const minMilliCpu = 10000
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD2 := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
WithMinAllowed(containerName, "10000", "").
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD2)
|
||||
utils.InstallVPA(f, vpaCRD2)
|
||||
vpaCRD := vpaCRD2
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled")
|
||||
vpa, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
vpa, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(1))
|
||||
cpu := getMilliCpu(vpa.Status.Recommendation.ContainerRecommendations[0].Target)
|
||||
|
|
@ -320,16 +321,16 @@ var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
|||
ginkgo.It("respects max allowed recommendation", func() {
|
||||
const maxMilliCpu = 1
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(containerName).
|
||||
WithMaxAllowed(containerName, "1m", "").
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled")
|
||||
vpa, err := WaitForUncappedCPURecommendationAbove(vpaClientSet, vpaCRD, maxMilliCpu)
|
||||
|
|
@ -347,67 +348,67 @@ func getMilliCpu(resources apiv1.ResourceList) int64 {
|
|||
return cpu.MilliValue()
|
||||
}
|
||||
|
||||
var _ = RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
var _ = utils.RecommenderE2eDescribe("VPA CRD object", func() {
|
||||
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline
|
||||
|
||||
var vpaClientSet vpa_clientset.Interface
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
vpaClientSet = getVpaClientSet(f)
|
||||
vpaClientSet = utils.GetVpaClientSet(f)
|
||||
})
|
||||
|
||||
ginkgo.It("with no containers opted out all containers get recommendations", func() {
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
d := NewNHamstersDeployment(f, 2 /*number of containers*/)
|
||||
_ = startDeploymentPods(f, d)
|
||||
d := utils.NewNHamstersDeployment(f, 2 /*number of containers*/)
|
||||
_ = utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container2Name := GetHamsterContainerNameByIndex(1)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
WithContainer(container2Name).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled for both containers")
|
||||
vpa, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
vpa, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(2))
|
||||
})
|
||||
|
||||
ginkgo.It("only containers not-opted-out get recommendations", func() {
|
||||
ginkgo.By("Setting up a hamster deployment")
|
||||
d := NewNHamstersDeployment(f, 2 /*number of containers*/)
|
||||
_ = startDeploymentPods(f, d)
|
||||
d := utils.NewNHamstersDeployment(f, 2 /*number of containers*/)
|
||||
_ = utils.StartDeploymentPods(f, d)
|
||||
|
||||
ginkgo.By("Setting up VPA CRD")
|
||||
container1Name := GetHamsterContainerNameByIndex(0)
|
||||
container2Name := GetHamsterContainerNameByIndex(1)
|
||||
container1Name := utils.GetHamsterContainerNameByIndex(0)
|
||||
container2Name := utils.GetHamsterContainerNameByIndex(1)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
WithTargetRef(hamsterTargetRef).
|
||||
WithTargetRef(utils.HamsterTargetRef).
|
||||
WithContainer(container1Name).
|
||||
WithScalingMode(container1Name, vpa_types.ContainerScalingModeOff).
|
||||
WithContainer(container2Name).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
ginkgo.By("Waiting for recommendation to be filled for just one container")
|
||||
vpa, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
vpa, err := utils.WaitForRecommendationPresent(vpaClientSet, vpaCRD)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
errMsg := fmt.Sprintf("%s container has recommendations turned off. We expect expect only recommendations for %s",
|
||||
GetHamsterContainerNameByIndex(0),
|
||||
GetHamsterContainerNameByIndex(1))
|
||||
utils.GetHamsterContainerNameByIndex(0),
|
||||
utils.GetHamsterContainerNameByIndex(1))
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(1), errMsg)
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations[0].ContainerName).To(gomega.Equal(GetHamsterContainerNameByIndex(1)), errMsg)
|
||||
gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations[0].ContainerName).To(gomega.Equal(utils.GetHamsterContainerNameByIndex(1)), errMsg)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
autoscaling "k8s.io/api/autoscaling/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
|
||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/status"
|
||||
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
|
||||
|
|
@ -217,12 +218,12 @@ func setupPodsForEviction(f *framework.Framework, hamsterCPU, hamsterMemory stri
|
|||
Name: "hamster-deployment",
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind))
|
||||
setupHamsterController(f, controller.Kind, hamsterCPU, hamsterMemory, defaultHamsterReplicas)
|
||||
setupHamsterController(f, controller.Kind, hamsterCPU, hamsterMemory, utils.DefaultHamsterReplicas)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaCRD := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -239,7 +240,7 @@ func setupPodsForEviction(f *framework.Framework, hamsterCPU, hamsterMemory stri
|
|||
GetContainerResources()).
|
||||
Get()
|
||||
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
return podList
|
||||
}
|
||||
|
|
@ -259,12 +260,12 @@ func setupPodsForInPlace(f *framework.Framework, hamsterCPU, hamsterMemory strin
|
|||
Name: "hamster-deployment",
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind))
|
||||
setupHamsterController(f, controller.Kind, hamsterCPU, hamsterMemory, defaultHamsterReplicas)
|
||||
setupHamsterController(f, controller.Kind, hamsterCPU, hamsterMemory, utils.DefaultHamsterReplicas)
|
||||
podList, err := GetHamsterPods(f)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Setting up a VPA CRD")
|
||||
containerName := GetHamsterContainerNameByIndex(0)
|
||||
containerName := utils.GetHamsterContainerNameByIndex(0)
|
||||
vpaBuilder := test.VerticalPodAutoscaler().
|
||||
WithName("hamster-vpa").
|
||||
WithNamespace(f.Namespace.Name).
|
||||
|
|
@ -284,7 +285,7 @@ func setupPodsForInPlace(f *framework.Framework, hamsterCPU, hamsterMemory strin
|
|||
}
|
||||
|
||||
vpaCRD := vpaBuilder.Get()
|
||||
InstallVPA(f, vpaCRD)
|
||||
utils.InstallVPA(f, vpaCRD)
|
||||
|
||||
return podList
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,3 +34,7 @@ The local test cases support running the `recommender` with external metrics. T
|
|||
additional permissions we don't want to automatically enable for all customers via the
|
||||
configuration given in `deploy/vpa-rbac.yaml`. The scripts use a context diff `hack/e2e/vpa-rbac.diff`
|
||||
to enable those permission when running locally.
|
||||
|
||||
# Quick Integration Tests
|
||||
|
||||
`run-integration-locally.sh` is a quicker way to integration test compared to `run-e2e-locally.sh`. Only used for simple tests.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,131 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2025 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
BASE_NAME=$(basename $0)
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
source "${SCRIPT_ROOT}/hack/lib/util.sh"
|
||||
|
||||
ARCH=$(kube::util::host_arch)
|
||||
|
||||
function print_help {
|
||||
echo "ERROR! Usage: $BASE_NAME <suite>"
|
||||
echo "<suite> should be one of:"
|
||||
echo " - recommender"
|
||||
}
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $# -gt 1 ]; then
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUITE=$1
|
||||
REQUIRED_COMMANDS="
|
||||
docker
|
||||
go
|
||||
kind
|
||||
kubectl
|
||||
make
|
||||
"
|
||||
|
||||
for i in $REQUIRED_COMMANDS; do
|
||||
if ! command -v $i > /dev/null 2>&1
|
||||
then
|
||||
echo "$i could not be found, please ensure it is installed"
|
||||
echo
|
||||
echo "The following commands are required to run these tests:"
|
||||
echo $REQUIRED_COMMANDS
|
||||
exit 1;
|
||||
fi
|
||||
done
|
||||
|
||||
if ! docker ps >/dev/null 2>&1
|
||||
then
|
||||
echo "docker isn't running"
|
||||
echo
|
||||
echo "Please ensure that docker is running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case ${SUITE} in
|
||||
recommender)
|
||||
COMPONENTS="${SUITE}"
|
||||
;;
|
||||
*)
|
||||
print_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Deleting KIND cluster 'kind'."
|
||||
kind delete cluster -n kind -q
|
||||
|
||||
echo "Creating KIND cluster 'kind'"
|
||||
KIND_VERSION="kindest/node:v1.33.0@sha256:02f73d6ae3f11ad5d543f16736a2cb2a63a300ad60e81dac22099b0b04784a4e"
|
||||
if ! kind create cluster --image=${KIND_VERSION}; then
|
||||
echo "Failed to create KIND cluster. Exiting. Make sure kind version is updated."
|
||||
echo "Available versions: https://github.com/kubernetes-sigs/kind/releases"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Local KIND images
|
||||
export REGISTRY=${REGISTRY:-localhost:5001}
|
||||
export TAG=${TAG:-latest}
|
||||
|
||||
rm -f ${SCRIPT_ROOT}/hack/e2e/vpa-rbac.yaml
|
||||
patch -c ${SCRIPT_ROOT}/deploy/vpa-rbac.yaml -i ${SCRIPT_ROOT}/hack/e2e/vpa-rbac.diff -o ${SCRIPT_ROOT}/hack/e2e/vpa-rbac.yaml
|
||||
kubectl apply -f ${SCRIPT_ROOT}/hack/e2e/vpa-rbac.yaml
|
||||
# Other-versioned CRDs are irrelevant as we're running a modern-ish cluster.
|
||||
kubectl apply -f ${SCRIPT_ROOT}/deploy/vpa-v1-crd-gen.yaml
|
||||
kubectl apply -f ${SCRIPT_ROOT}/hack/e2e/k8s-metrics-server.yaml
|
||||
|
||||
for i in ${COMPONENTS}; do
|
||||
ALL_ARCHITECTURES=${ARCH} make --directory ${SCRIPT_ROOT}/pkg/${i} docker-build REGISTRY=${REGISTRY} TAG=${TAG}
|
||||
docker tag ${REGISTRY}/vpa-${i}-${ARCH}:${TAG} ${REGISTRY}/vpa-${i}:${TAG}
|
||||
kind load docker-image ${REGISTRY}/vpa-${i}:${TAG}
|
||||
done
|
||||
|
||||
export GO111MODULE=on
|
||||
|
||||
case ${SUITE} in
|
||||
recommender)
|
||||
|
||||
export KUBECONFIG=$HOME/.kube/config
|
||||
pushd ${SCRIPT_ROOT}/e2e
|
||||
go test ./integration/*go -v --test.timeout=10m --args --ginkgo.v=true --ginkgo.focus="\[VPA\] \[${SUITE}\]" --disable-log-dump --ginkgo.timeout=10m
|
||||
INTEGRATION_RESULT=$?
|
||||
popd
|
||||
echo integration test result: ${INTEGRATION_RESULT}
|
||||
if [ $INTEGRATION_RESULT -gt 0 ]; then
|
||||
echo "Please check integration \"go test\" logs!"
|
||||
fi
|
||||
if [ $INTEGRATION_RESULT -gt 0 ]; then
|
||||
echo "Tests failed"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
print_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
Loading…
Reference in New Issue