Merge pull request #1998 from jbartosik/limit-support
Add e2e test for keeping limit equal to request
This commit is contained in:
commit
06664ee4da
|
|
@ -195,6 +195,40 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("keeps limits equal to request", func() {
|
||||||
|
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
|
||||||
|
|
||||||
|
ginkgo.By("Setting up a VPA CRD")
|
||||||
|
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
|
||||||
|
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
|
||||||
|
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
|
||||||
|
ContainerName: "hamster",
|
||||||
|
Target: apiv1.ResourceList{
|
||||||
|
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
|
||||||
|
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{
|
||||||
|
ContainerPolicies: []vpa_types.ContainerResourcePolicy{{
|
||||||
|
ContainerName: "hamster",
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
InstallVPA(f, vpaCRD)
|
||||||
|
|
||||||
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
||||||
|
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
|
||||||
|
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal
|
||||||
|
for _, pod := range podList.Items {
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {
|
func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {
|
||||||
|
|
|
||||||
|
|
@ -130,6 +130,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific
|
||||||
|
// resource requests for e2e test purposes. Since the container in the pod specifies resource limits
|
||||||
|
// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed
|
||||||
|
// QoS class.
|
||||||
|
func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment {
|
||||||
|
d := NewHamsterDeployment(f)
|
||||||
|
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
|
||||||
|
apiv1.ResourceCPU: cpuQuantity,
|
||||||
|
apiv1.ResourceMemory: memoryQuantity,
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
|
// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
|
||||||
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
|
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
|
||||||
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
|
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue