From e303d037880cee34d2741c03f0a9e007e9709ee0 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Wed, 5 Jun 2019 15:12:55 +0200 Subject: [PATCH 1/2] Clean up e2e vpa tests --- .../e2e/v1beta2/admission_controller.go | 4 ++-- vertical-pod-autoscaler/e2e/v1beta2/common.go | 18 +++++++++--------- .../pkg/utils/vpa/capping.go | 4 ++-- .../pkg/utils/vpa/capping_test.go | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 54c3891b9c..a50322ff6d 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -197,7 +197,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request according to pod max limit set in pod LimitRange", func() { + ginkgo.It("caps request according to pod max limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) @@ -247,7 +247,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("raises request according to pod min limit set in pod LimitRange", func() { + ginkgo.It("raises request according to pod min limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 211d8f0b84..ddbdf9ec46 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -103,9 +103,9 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) d.Spec.Replicas = &replicas d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation") err = framework.WaitForDeploymentComplete(f.ClientSet, d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish") return d } @@ -234,18 +234,18 @@ func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVer func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { ns := f.Namespace.Name config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework") vpaClientSet := vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1beta2() _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA") } // ParseQuantityOrDie parses quantity from string and dies with an error if // unparsable. func ParseQuantityOrDie(text string) resource.Quantity { quantity, err := resource.ParseQuantity(text) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error parsing quantity: %s", text) return quantity } @@ -337,9 +337,9 @@ func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int { func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) { time.Sleep(VpaEvictionTimeout) currentPodList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when listing hamster pods to check number of pod evictions") restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet) - gomega.Expect(restarted).To(gomega.Equal(0)) + gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions") } // WaitForVPAMatch pools VPA object until match function returns true. Returns @@ -401,7 +401,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if minMemoryLimit != nil || minCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ - Type: apiv1.LimitTypeContainer, + Type: lrType, Min: apiv1.ResourceList{}, } if minCpuLimit != nil { @@ -413,7 +413,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC lr.Spec.Limits = append(lr.Spec.Limits, lrItem) } _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") } // InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index cc4cd43d19..78772c1755 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -345,8 +345,8 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, if minLimit.Cmp(sumRecommendation) > 0 { for i := range pod.Spec.Containers { - limit := resources[i].Target[resourceName] - cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumRecommendation, &minLimit) + request := resources[i].Target[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) resources[i].Target[resourceName] = *cappedContainerRequest } return resources diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 35c44e3e8c..6ccc3d32d1 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -489,7 +489,7 @@ func TestApplyPodLimitRange(t *testing.T) { }, }, { - name: "cap mem request to min", + name: "cap mem request to pod min", resources: []vpa_types.RecommendedContainerResources{ { ContainerName: "container1", @@ -533,7 +533,7 @@ func TestApplyPodLimitRange(t *testing.T) { limitRange: apiv1.LimitRangeItem{ Type: apiv1.LimitTypePod, Max: apiv1.ResourceList{ - apiv1.ResourceCPU: resource.MustParse("10G"), + apiv1.ResourceMemory: resource.MustParse("10G"), }, Min: apiv1.ResourceList{ apiv1.ResourceMemory: resource.MustParse("4G"), From 8ea9f6a559b4969f80c72cb93bc5133c8e9b7578 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 6 Jun 2019 11:04:02 +0200 Subject: [PATCH 2/2] Fix pod limit range e2e tests Looks like target not in [lower bound, upper bound] is what was breaking e2e tests. --- .../pkg/utils/vpa/capping.go | 23 ++++++++++++++----- .../pkg/utils/vpa/capping_test.go | 3 ++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 78772c1755..19a17a367d 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -320,7 +320,8 @@ func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv } func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, - pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName) []vpa_types.RecommendedContainerResources { + pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName, + fieldGetter func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList) []vpa_types.RecommendedContainerResources { minLimit := limitRange.Min[resourceName] maxLimit := limitRange.Max[resourceName] defaultLimit := limitRange.Default[resourceName] @@ -332,7 +333,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, } limit := container.Resources.Limits[resourceName] request := container.Resources.Requests[resourceName] - recommendation := resources[i].Target[resourceName] + recommendation := (*fieldGetter(resources[i]))[resourceName] containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit) if containerLimit != nil { sumLimit.Add(*containerLimit) @@ -345,9 +346,9 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, if minLimit.Cmp(sumRecommendation) > 0 { for i := range pod.Spec.Containers { - request := resources[i].Target[resourceName] + request := (*fieldGetter(resources[i]))[resourceName] cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) - resources[i].Target[resourceName] = *cappedContainerRequest + (*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest } return resources } @@ -376,7 +377,17 @@ func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange( if podLimitRange == nil { return containerRecommendations, nil } - containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU) - containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory) + getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target } + getUpper := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.UpperBound } + getLower := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.LowerBound } + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getUpper) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getUpper) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getTarget) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getTarget) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getLower) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getLower) return containerRecommendations, nil } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 6ccc3d32d1..3ae3258f87 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -556,9 +556,10 @@ func TestApplyPodLimitRange(t *testing.T) { }, }, } + getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName) + got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName, getTarget) assert.Equal(t, tc.expect, got) }) }