Merge pull request #2095 from jbartosik/pod-limit-range-e2e

Clean up e2e vpa tests
This commit is contained in:
Kubernetes Prow Robot 2019-06-06 03:12:12 -07:00 committed by GitHub
commit 037355ea2e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 33 additions and 21 deletions

View File

@ -197,7 +197,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})
ginkgo.It("caps request according to pod max limit set in pod LimitRange", func() {
ginkgo.It("caps request according to pod max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
@ -247,7 +247,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})
ginkgo.It("raises request according to pod min limit set in pod LimitRange", func() {
ginkgo.It("raises request according to pod min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)

View File

@ -103,9 +103,9 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas
d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity)
d.Spec.Replicas = &replicas
d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation")
err = framework.WaitForDeploymentComplete(f.ClientSet, d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish")
return d
}
@ -234,18 +234,18 @@ func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVer
func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) {
ns := f.Namespace.Name
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework")
vpaClientSet := vpa_clientset.NewForConfigOrDie(config)
vpaClient := vpaClientSet.AutoscalingV1beta2()
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA")
}
// ParseQuantityOrDie parses quantity from string and dies with an error if
// unparsable.
func ParseQuantityOrDie(text string) resource.Quantity {
quantity, err := resource.ParseQuantity(text)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error parsing quantity: %s", text)
return quantity
}
@ -337,9 +337,9 @@ func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int {
func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) {
time.Sleep(VpaEvictionTimeout)
currentPodList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when listing hamster pods to check number of pod evictions")
restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet)
gomega.Expect(restarted).To(gomega.Equal(0))
gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions")
}
// WaitForVPAMatch pools VPA object until match function returns true. Returns
@ -401,7 +401,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
if minMemoryLimit != nil || minCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Type: lrType,
Min: apiv1.ResourceList{},
}
if minCpuLimit != nil {
@ -413,7 +413,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
}
_, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range")
}
// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.

View File

@ -320,7 +320,8 @@ func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv
}
func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName) []vpa_types.RecommendedContainerResources {
pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName,
fieldGetter func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList) []vpa_types.RecommendedContainerResources {
minLimit := limitRange.Min[resourceName]
maxLimit := limitRange.Max[resourceName]
defaultLimit := limitRange.Default[resourceName]
@ -332,7 +333,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
}
limit := container.Resources.Limits[resourceName]
request := container.Resources.Requests[resourceName]
recommendation := resources[i].Target[resourceName]
recommendation := (*fieldGetter(resources[i]))[resourceName]
containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit)
if containerLimit != nil {
sumLimit.Add(*containerLimit)
@ -345,9 +346,9 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
if minLimit.Cmp(sumRecommendation) > 0 {
for i := range pod.Spec.Containers {
limit := resources[i].Target[resourceName]
cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumRecommendation, &minLimit)
resources[i].Target[resourceName] = *cappedContainerRequest
request := (*fieldGetter(resources[i]))[resourceName]
cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit)
(*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest
}
return resources
}
@ -376,7 +377,17 @@ func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange(
if podLimitRange == nil {
return containerRecommendations, nil
}
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU)
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory)
getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target }
getUpper := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.UpperBound }
getLower := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.LowerBound }
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getUpper)
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getUpper)
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getTarget)
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getTarget)
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getLower)
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getLower)
return containerRecommendations, nil
}

View File

@ -489,7 +489,7 @@ func TestApplyPodLimitRange(t *testing.T) {
},
},
{
name: "cap mem request to min",
name: "cap mem request to pod min",
resources: []vpa_types.RecommendedContainerResources{
{
ContainerName: "container1",
@ -533,7 +533,7 @@ func TestApplyPodLimitRange(t *testing.T) {
limitRange: apiv1.LimitRangeItem{
Type: apiv1.LimitTypePod,
Max: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("10G"),
apiv1.ResourceMemory: resource.MustParse("10G"),
},
Min: apiv1.ResourceList{
apiv1.ResourceMemory: resource.MustParse("4G"),
@ -556,9 +556,10 @@ func TestApplyPodLimitRange(t *testing.T) {
},
},
}
getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target }
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName)
got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName, getTarget)
assert.Equal(t, tc.expect, got)
})
}