Merge pull request #2095 from jbartosik/pod-limit-range-e2e
Clean up e2e vpa tests
This commit is contained in:
commit
037355ea2e
|
|
@ -197,7 +197,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("caps request according to pod max limit set in pod LimitRange", func() {
|
ginkgo.It("caps request according to pod max limit set in LimitRange", func() {
|
||||||
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||||
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
|
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
|
||||||
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
|
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
|
||||||
|
|
@ -247,7 +247,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("raises request according to pod min limit set in pod LimitRange", func() {
|
ginkgo.It("raises request according to pod min limit set in LimitRange", func() {
|
||||||
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||||
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||||
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||||
|
|
|
||||||
|
|
@ -103,9 +103,9 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas
|
||||||
d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity)
|
d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity)
|
||||||
d.Spec.Replicas = &replicas
|
d.Spec.Replicas = &replicas
|
||||||
d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d)
|
d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation")
|
||||||
err = framework.WaitForDeploymentComplete(f.ClientSet, d)
|
err = framework.WaitForDeploymentComplete(f.ClientSet, d)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish")
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -234,18 +234,18 @@ func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVer
|
||||||
func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) {
|
func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) {
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
config, err := framework.LoadConfig()
|
config, err := framework.LoadConfig()
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework")
|
||||||
vpaClientSet := vpa_clientset.NewForConfigOrDie(config)
|
vpaClientSet := vpa_clientset.NewForConfigOrDie(config)
|
||||||
vpaClient := vpaClientSet.AutoscalingV1beta2()
|
vpaClient := vpaClientSet.AutoscalingV1beta2()
|
||||||
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa)
|
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseQuantityOrDie parses quantity from string and dies with an error if
|
// ParseQuantityOrDie parses quantity from string and dies with an error if
|
||||||
// unparsable.
|
// unparsable.
|
||||||
func ParseQuantityOrDie(text string) resource.Quantity {
|
func ParseQuantityOrDie(text string) resource.Quantity {
|
||||||
quantity, err := resource.ParseQuantity(text)
|
quantity, err := resource.ParseQuantity(text)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error parsing quantity: %s", text)
|
||||||
return quantity
|
return quantity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -337,9 +337,9 @@ func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int {
|
||||||
func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) {
|
func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) {
|
||||||
time.Sleep(VpaEvictionTimeout)
|
time.Sleep(VpaEvictionTimeout)
|
||||||
currentPodList, err := GetHamsterPods(f)
|
currentPodList, err := GetHamsterPods(f)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when listing hamster pods to check number of pod evictions")
|
||||||
restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet)
|
restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet)
|
||||||
gomega.Expect(restarted).To(gomega.Equal(0))
|
gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions")
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForVPAMatch pools VPA object until match function returns true. Returns
|
// WaitForVPAMatch pools VPA object until match function returns true. Returns
|
||||||
|
|
@ -401,7 +401,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
|
||||||
|
|
||||||
if minMemoryLimit != nil || minCpuLimit != nil {
|
if minMemoryLimit != nil || minCpuLimit != nil {
|
||||||
lrItem := apiv1.LimitRangeItem{
|
lrItem := apiv1.LimitRangeItem{
|
||||||
Type: apiv1.LimitTypeContainer,
|
Type: lrType,
|
||||||
Min: apiv1.ResourceList{},
|
Min: apiv1.ResourceList{},
|
||||||
}
|
}
|
||||||
if minCpuLimit != nil {
|
if minCpuLimit != nil {
|
||||||
|
|
@ -413,7 +413,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
|
||||||
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
|
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
|
||||||
}
|
}
|
||||||
_, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr)
|
_, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range")
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
|
// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
|
||||||
|
|
|
||||||
|
|
@ -320,7 +320,8 @@ func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
|
func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
|
||||||
pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName) []vpa_types.RecommendedContainerResources {
|
pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName,
|
||||||
|
fieldGetter func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList) []vpa_types.RecommendedContainerResources {
|
||||||
minLimit := limitRange.Min[resourceName]
|
minLimit := limitRange.Min[resourceName]
|
||||||
maxLimit := limitRange.Max[resourceName]
|
maxLimit := limitRange.Max[resourceName]
|
||||||
defaultLimit := limitRange.Default[resourceName]
|
defaultLimit := limitRange.Default[resourceName]
|
||||||
|
|
@ -332,7 +333,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
|
||||||
}
|
}
|
||||||
limit := container.Resources.Limits[resourceName]
|
limit := container.Resources.Limits[resourceName]
|
||||||
request := container.Resources.Requests[resourceName]
|
request := container.Resources.Requests[resourceName]
|
||||||
recommendation := resources[i].Target[resourceName]
|
recommendation := (*fieldGetter(resources[i]))[resourceName]
|
||||||
containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit)
|
containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit)
|
||||||
if containerLimit != nil {
|
if containerLimit != nil {
|
||||||
sumLimit.Add(*containerLimit)
|
sumLimit.Add(*containerLimit)
|
||||||
|
|
@ -345,9 +346,9 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
|
||||||
|
|
||||||
if minLimit.Cmp(sumRecommendation) > 0 {
|
if minLimit.Cmp(sumRecommendation) > 0 {
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
limit := resources[i].Target[resourceName]
|
request := (*fieldGetter(resources[i]))[resourceName]
|
||||||
cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumRecommendation, &minLimit)
|
cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit)
|
||||||
resources[i].Target[resourceName] = *cappedContainerRequest
|
(*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest
|
||||||
}
|
}
|
||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
@ -376,7 +377,17 @@ func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange(
|
||||||
if podLimitRange == nil {
|
if podLimitRange == nil {
|
||||||
return containerRecommendations, nil
|
return containerRecommendations, nil
|
||||||
}
|
}
|
||||||
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU)
|
getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target }
|
||||||
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory)
|
getUpper := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.UpperBound }
|
||||||
|
getLower := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.LowerBound }
|
||||||
|
|
||||||
|
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getUpper)
|
||||||
|
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getUpper)
|
||||||
|
|
||||||
|
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getTarget)
|
||||||
|
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getTarget)
|
||||||
|
|
||||||
|
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getLower)
|
||||||
|
containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getLower)
|
||||||
return containerRecommendations, nil
|
return containerRecommendations, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -489,7 +489,7 @@ func TestApplyPodLimitRange(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cap mem request to min",
|
name: "cap mem request to pod min",
|
||||||
resources: []vpa_types.RecommendedContainerResources{
|
resources: []vpa_types.RecommendedContainerResources{
|
||||||
{
|
{
|
||||||
ContainerName: "container1",
|
ContainerName: "container1",
|
||||||
|
|
@ -533,7 +533,7 @@ func TestApplyPodLimitRange(t *testing.T) {
|
||||||
limitRange: apiv1.LimitRangeItem{
|
limitRange: apiv1.LimitRangeItem{
|
||||||
Type: apiv1.LimitTypePod,
|
Type: apiv1.LimitTypePod,
|
||||||
Max: apiv1.ResourceList{
|
Max: apiv1.ResourceList{
|
||||||
apiv1.ResourceCPU: resource.MustParse("10G"),
|
apiv1.ResourceMemory: resource.MustParse("10G"),
|
||||||
},
|
},
|
||||||
Min: apiv1.ResourceList{
|
Min: apiv1.ResourceList{
|
||||||
apiv1.ResourceMemory: resource.MustParse("4G"),
|
apiv1.ResourceMemory: resource.MustParse("4G"),
|
||||||
|
|
@ -556,9 +556,10 @@ func TestApplyPodLimitRange(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target }
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName)
|
got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName, getTarget)
|
||||||
assert.Equal(t, tc.expect, got)
|
assert.Equal(t, tc.expect, got)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue