e2e test for VPA updater observing limit range
This commit is contained in:
parent
da76008702
commit
c1ceb0fe43
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
||||||
package autoscaling
|
package autoscaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
@ -145,13 +143,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
InstallVPA(f, vpaCRD)
|
InstallVPA(f, vpaCRD)
|
||||||
|
|
||||||
cpuLimit := "300m"
|
|
||||||
memLimit := "1T"
|
|
||||||
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit))
|
|
||||||
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
|
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
|
||||||
// recommendation is 250m
|
// recommendation is 250m
|
||||||
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
|
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
|
||||||
InstallLimitRangeWithMax(f, cpuLimit, memLimit)
|
InstallLimitRangeWithMax(f, "300m", "1T")
|
||||||
|
|
||||||
ginkgo.By("Setting up a hamster deployment")
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
podList := startDeploymentPods(f, d)
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
@ -163,6 +158,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T")))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
||||||
}
|
}
|
||||||
|
|
@ -188,13 +185,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
InstallVPA(f, vpaCRD)
|
InstallVPA(f, vpaCRD)
|
||||||
|
|
||||||
cpuLimit := "75m"
|
|
||||||
memLimit := "250Mi"
|
|
||||||
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit))
|
|
||||||
// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
|
// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
|
||||||
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
|
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
|
||||||
// recommendation is 100Mi.
|
// recommendation is 100Mi.
|
||||||
InstallLimitRangeWithMin(f, cpuLimit, memLimit)
|
InstallLimitRangeWithMin(f, "75m", "250Mi")
|
||||||
|
|
||||||
ginkgo.By("Setting up a hamster deployment")
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
podList := startDeploymentPods(f, d)
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
@ -206,6 +200,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi")))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -396,13 +396,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
|
||||||
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) {
|
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit))
|
||||||
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
|
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
|
||||||
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
|
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
|
||||||
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity)
|
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory.
|
||||||
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) {
|
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit))
|
||||||
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
|
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
|
||||||
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
|
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
|
||||||
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil)
|
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil)
|
||||||
|
|
|
||||||
|
|
@ -98,6 +98,44 @@ var _ = UpdaterE2eDescribe("Updater", func() {
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
|
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("observes max in LimitRange", func() {
|
||||||
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
|
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||||
|
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||||
|
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||||
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
||||||
|
ginkgo.By("Setting up a VPA CRD")
|
||||||
|
SetupVPA(f, "200m", vpa_types.UpdateModeAuto)
|
||||||
|
|
||||||
|
// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
|
||||||
|
// recommendation is 200m
|
||||||
|
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
|
||||||
|
InstallLimitRangeWithMax(f, "300m", "1T")
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||||
|
CheckNoPodsEvicted(f, MakePodSet(podList))
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("observes min in LimitRange", func() {
|
||||||
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
|
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||||
|
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||||
|
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||||
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
||||||
|
ginkgo.By("Setting up a VPA CRD")
|
||||||
|
SetupVPA(f, "50m", vpa_types.UpdateModeAuto)
|
||||||
|
|
||||||
|
// Min CPU limit is 300m and ratio is 3., so min request is 100m, while
|
||||||
|
// recommendation is 200m
|
||||||
|
// Min memory limit is 0 and ratio is 2., so min request is 0
|
||||||
|
InstallLimitRangeWithMin(f, "300m", "0")
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||||
|
CheckNoPodsEvicted(f, MakePodSet(podList))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func testEvictsPods(f *framework.Framework, controllerKind string) {
|
func testEvictsPods(f *framework.Framework, controllerKind string) {
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
||||||
package autoscaling
|
package autoscaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
|
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
|
||||||
|
|
@ -136,13 +134,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
InstallVPA(f, vpaCRD)
|
InstallVPA(f, vpaCRD)
|
||||||
|
|
||||||
cpuLimit := "300m"
|
|
||||||
memLimit := "1T"
|
|
||||||
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit))
|
|
||||||
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
|
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
|
||||||
// recommendation is 250m
|
// recommendation is 250m
|
||||||
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
|
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
|
||||||
InstallLimitRangeWithMax(f, cpuLimit, memLimit)
|
InstallLimitRangeWithMax(f, "300m", "1T")
|
||||||
|
|
||||||
ginkgo.By("Setting up a hamster deployment")
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
podList := startDeploymentPods(f, d)
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
@ -154,6 +149,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T")))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
||||||
}
|
}
|
||||||
|
|
@ -177,13 +174,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
}
|
}
|
||||||
InstallVPA(f, vpaCRD)
|
InstallVPA(f, vpaCRD)
|
||||||
|
|
||||||
cpuLimit := "75m"
|
|
||||||
memLimit := "250Mi"
|
|
||||||
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit))
|
|
||||||
// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
|
// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
|
||||||
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
|
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
|
||||||
// recommendation is 100Mi.
|
// recommendation is 100Mi.
|
||||||
InstallLimitRangeWithMin(f, cpuLimit, memLimit)
|
InstallLimitRangeWithMin(f, "75m", "250Mi")
|
||||||
|
|
||||||
ginkgo.By("Setting up a hamster deployment")
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
podList := startDeploymentPods(f, d)
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
@ -195,6 +189,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
|
||||||
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m")))
|
||||||
|
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi")))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
|
||||||
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -389,13 +389,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
|
||||||
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) {
|
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit))
|
||||||
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
|
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
|
||||||
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
|
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
|
||||||
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity)
|
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory.
|
||||||
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) {
|
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit))
|
||||||
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
|
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
|
||||||
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
|
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
|
||||||
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil)
|
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil)
|
||||||
|
|
|
||||||
|
|
@ -119,6 +119,44 @@ var _ = UpdaterE2eDescribe("Updater", func() {
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
|
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("observes max in LimitRange", func() {
|
||||||
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
|
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||||
|
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||||
|
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||||
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
||||||
|
ginkgo.By("Setting up a VPA CRD")
|
||||||
|
SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef)
|
||||||
|
|
||||||
|
// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
|
||||||
|
// recommendation is 200m
|
||||||
|
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
|
||||||
|
InstallLimitRangeWithMax(f, "300m", "1T")
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||||
|
CheckNoPodsEvicted(f, MakePodSet(podList))
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("observes min in LimitRange", func() {
|
||||||
|
ginkgo.By("Setting up a hamster deployment")
|
||||||
|
d := NewHamsterDeploymentWithResourcesAndLimits(f,
|
||||||
|
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
|
||||||
|
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
|
||||||
|
podList := startDeploymentPods(f, d)
|
||||||
|
|
||||||
|
ginkgo.By("Setting up a VPA CRD")
|
||||||
|
SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef)
|
||||||
|
|
||||||
|
// Min CPU limit is 300m and ratio is 3., so min request is 100m, while
|
||||||
|
// recommendation is 200m
|
||||||
|
// Min memory limit is 0 and ratio is 2., so min request is 0
|
||||||
|
InstallLimitRangeWithMin(f, "300m", "0")
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
|
||||||
|
CheckNoPodsEvicted(f, MakePodSet(podList))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) {
|
func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue