Fix raises request according to pod min limit set in LimitRange test
Admisson controller was crashing because it was tryig to divide by 0 so it dind't make any changes to pod. Adter I fixed that only 1 pod would fit in the cluster so I lowered recommendation.
This commit is contained in:
parent
f507cd1e6a
commit
9cb45438c1
|
|
@ -260,14 +260,14 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
{
|
{
|
||||||
ContainerName: "hamster",
|
ContainerName: "hamster",
|
||||||
Target: apiv1.ResourceList{
|
Target: apiv1.ResourceList{
|
||||||
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
|
apiv1.ResourceCPU: ParseQuantityOrDie("120m"),
|
||||||
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
|
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ContainerName: "hamster2",
|
ContainerName: "hamster2",
|
||||||
Target: apiv1.ResourceList{
|
Target: apiv1.ResourceList{
|
||||||
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
|
apiv1.ResourceCPU: ParseQuantityOrDie("120m"),
|
||||||
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
|
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -289,7 +289,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
// request that limitrange allows.
|
// request that limitrange allows.
|
||||||
// Limit to request ratio should stay unchanged.
|
// Limit to request ratio should stay unchanged.
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m")))
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi")))
|
||||||
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75))
|
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75))
|
||||||
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024))
|
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024))
|
||||||
|
|
|
||||||
|
|
@ -344,7 +344,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
|
||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
if minLimit.Cmp(sumRecommendation) > 0 {
|
if minLimit.Cmp(sumRecommendation) > 0 && !sumLimit.IsZero() {
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
request := (*fieldGetter(resources[i]))[resourceName]
|
request := (*fieldGetter(resources[i]))[resourceName]
|
||||||
cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit)
|
cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit)
|
||||||
|
|
@ -353,6 +353,10 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources,
|
||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sumLimit.IsZero() {
|
||||||
|
return resources
|
||||||
|
}
|
||||||
|
|
||||||
var targetTotalLimit resource.Quantity
|
var targetTotalLimit resource.Quantity
|
||||||
if minLimit.Cmp(sumLimit) > 0 {
|
if minLimit.Cmp(sumLimit) > 0 {
|
||||||
targetTotalLimit = minLimit
|
targetTotalLimit = minLimit
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue