Remove e2e tests for v1beta1

This commit is contained in:
Beata Skiba 2019-10-09 17:24:41 +02:00
parent a258103f8e
commit 9420e790a0
9 changed files with 0 additions and 2728 deletions

View File

@ -1,173 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = ActuationSuiteE2eDescribe("Actuation", func() {
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
ginkgo.It("stops when pods get pending", func() {
ginkgo.By("Setting up a hamster deployment")
d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
ginkgo.By("Setting up a VPA CRD with ridiculous request")
SetupVPA(f, "9999", vpa_types.UpdateModeAuto) // Request 9999 CPUs to make POD pending
ginkgo.By("Waiting for pods to be restarted and stuck pending")
err := assertPodsPendingForDuration(f.ClientSet, d, 1, 2*time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.It("never applies recommendations when update mode is Off", func() {
ginkgo.By("Setting up a hamster deployment")
d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
cpuRequest := getCPURequest(d.Spec.Template.Spec)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podSet := MakePodSet(podList)
ginkgo.By("Setting up a VPA CRD in mode Off")
SetupVPA(f, "200m", vpa_types.UpdateModeOff)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
ginkgo.By("Forcefully killing one pod")
killPod(f, podList)
ginkgo.By("Checking the requests were not modified")
updatedPodList, err := GetHamsterPods(f)
for _, pod := range updatedPodList.Items {
gomega.Expect(getCPURequest(pod.Spec)).To(gomega.Equal(cpuRequest))
}
})
ginkgo.It("applies recommendations only on restart when update mode is Initial", func() {
ginkgo.By("Setting up a hamster deployment")
SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podSet := MakePodSet(podList)
ginkgo.By("Setting up a VPA CRD in mode Initial")
SetupVPA(f, "200m", vpa_types.UpdateModeInitial)
updatedCPURequest := ParseQuantityOrDie("200m")
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
ginkgo.By("Forcefully killing one pod")
killPod(f, podList)
ginkgo.By("Checking that request was modified after forceful restart")
updatedPodList, err := GetHamsterPods(f)
foundUpdated := 0
for _, pod := range updatedPodList.Items {
podRequest := getCPURequest(pod.Spec)
framework.Logf("podReq: %v", podRequest)
if podRequest.Cmp(updatedCPURequest) == 0 {
foundUpdated += 1
}
}
gomega.Expect(foundUpdated).To(gomega.Equal(1))
})
})
func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity {
return podSpec.Containers[0].Resources.Requests[apiv1.ResourceCPU]
}
func killPod(f *framework.Framework, podList *apiv1.PodList) {
f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podList.Items[0].Name, &metav1.DeleteOptions{})
err := WaitForPodsRestarted(f, podList)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// assertPodsPendingForDuration checks that at most pendingPodsNum pods are pending for pendingDuration
func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Deployment, pendingPodsNum int, pendingDuration time.Duration) error {
pendingPods := make(map[string]time.Time)
err := wait.PollImmediate(pollInterval, pollTimeout+pendingDuration, func() (bool, error) {
var err error
currentPodList, err := framework.GetPodsForDeployment(c, deployment)
if err != nil {
return false, err
}
missingPods := make(map[string]bool)
for podName := range pendingPods {
missingPods[podName] = true
}
now := time.Now()
for _, pod := range currentPodList.Items {
delete(missingPods, pod.Name)
switch pod.Status.Phase {
case apiv1.PodPending:
_, ok := pendingPods[pod.Name]
if !ok {
pendingPods[pod.Name] = now
}
default:
delete(pendingPods, pod.Name)
}
}
for missingPod := range missingPods {
delete(pendingPods, missingPod)
}
if len(pendingPods) < pendingPodsNum {
return false, nil
}
if len(pendingPods) > pendingPodsNum {
return false, fmt.Errorf("%v pending pods seen - expecting %v", len(pendingPods), pendingPodsNum)
}
for p, t := range pendingPods {
fmt.Println("task", now, p, t, now.Sub(t), pendingDuration)
if now.Sub(t) < pendingDuration {
return false, nil
}
}
return true, nil
})
if err != nil {
return fmt.Errorf("assertion failed for pending pods in %v: %v", deployment.Name, err)
}
return nil
}

View File

@ -1,531 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
ginkgo.It("starts pods with new recommended request", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to recommended 250m CPU and 200Mi of memory.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})
ginkgo.It("doesn't block patches", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to recommended 250m CPU and 200Mi of memory.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
ginkgo.By("Modifying recommendation.")
PatchVpaRecommendation(f, vpaCRD, &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("100m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"),
},
}},
})
podName := podList.Items[0].Name
ginkgo.By(fmt.Sprintf("Modifying pod %v.", podName))
AnnotatePod(f, podName, "someAnnotation", "someValue")
})
ginkgo.It("keeps limits equal to request", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})
ginkgo.It("keeps limits to request ratio constant", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})
ginkgo.It("caps request according to container max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
// recommendation is 250m
// Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi
InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 200m CPU (as this is the recommendation
// capped according to max limit in LimitRange) and 200Mi of memory,
// which is uncapped. Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})
ginkgo.It("raises request according to container min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
}},
}
InstallVPA(f, vpaCRD)
// Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min
// request is 50m and min limit is 75
// Min memory limit is 250Mi and it applies to both limit and request. Recommendation is 100Mi.
// It should be scaled up to 250Mi.
InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
// request that limitrange allows.
// Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})
ginkgo.It("caps request according to pod max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
},
{
ContainerName: "hamster2",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
},
},
}
InstallVPA(f, vpaCRD)
// Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m,
// while recommendation is 250m
// Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi
InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 200m CPU (as this is the recommendation
// capped according to max limit in LimitRange) and 200Mi of memory,
// which is uncapped. Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})
ginkgo.It("raises request according to pod min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("120m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
},
{
ContainerName: "hamster2",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("120m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
},
},
}
InstallVPA(f, vpaCRD)
// Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and
// request so min request is 50m and min limit is 75
// Min memory limit is 500Mi per pod, 250 per container and it applies to both limit and request.
// Recommendation is 100Mi it should be scaled up to 250Mi.
InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
// request that limitrange allows.
// Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})
ginkgo.It("caps request to max set in VPA", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{
ContainerPolicies: []vpa_types.ContainerResourcePolicy{{
ContainerName: "hamster",
MaxAllowed: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("233m"),
apiv1.ResourceMemory: ParseQuantityOrDie("150Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 233m CPU and 150Mi of memory (as this is the recommendation
// capped to max specified in VPA)
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("233m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("150Mi")))
}
})
ginkgo.It("raises request to min set in VPA", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("50m"),
apiv1.ResourceMemory: ParseQuantityOrDie("60Mi"),
},
}},
}
vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{
ContainerPolicies: []vpa_types.ContainerResourcePolicy{{
ContainerName: "hamster",
MinAllowed: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("90m"),
apiv1.ResourceMemory: ParseQuantityOrDie("80Mi"),
},
}},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to recommended 90m CPU and 800Mi of memory (as this the
// recommendation raised to min specified in VPA)
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("90m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("80Mi")))
}
})
ginkgo.It("leaves users request when no recommendation", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// VPA has no recommendation, so user's request is passed through
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("100m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("100Mi")))
}
})
ginkgo.It("passes empty request when no recommendation and no user-specified request", func() {
d := NewHamsterDeployment(f)
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
// VPA has no recommendation, deployment has no request specified
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests).To(gomega.BeEmpty())
}
})
ginkgo.It("accepts valid and rejects invalid VPA object", func() {
ginkgo.By("Setting up valid VPA object")
validVPA := []byte(`{
"kind": "VerticalPodAutoscaler",
"apiVersion": "autoscaling.k8s.io/v1beta1",
"metadata": {"name": "hamster-vpa-valid"},
"spec": {
"targetRef": {
"apiVersion": "apps/v1",
"kind": "Deployment",
"name":"hamster"
},
"resourcePolicy": {
"containerPolicies": [{"containerName": "*", "minAllowed":{"cpu":"50m"}}]
}
}
}`)
err := InstallRawVPA(f, validVPA)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Valid VPA object rejected")
ginkgo.By("Setting up invalid VPA object")
// The invalid object differs by name and minAllowed - there is an invalid "requests" field.
invalidVPA := []byte(`{
"kind": "VerticalPodAutoscaler",
"apiVersion": "autoscaling.k8s.io/v1beta1",
"metadata": {"name": "hamster-vpa-invalid"},
"spec": {
"targetRef": {
"apiVersion": "apps/v1",
"kind": "Deployment",
"name":"hamster"
},
"resourcePolicy": {
"containerPolicies": [{"containerName": "*", "minAllowed":{"requests":{"cpu":"50m"}}}]
}
}
}`)
err2 := InstallRawVPA(f, invalidVPA)
gomega.Expect(err2).To(gomega.HaveOccurred(), "Invalid VPA object accepted")
gomega.Expect(err2.Error()).To(gomega.MatchRegexp(`.*admission webhook .*vpa.* denied the request: .*`))
})
})
func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {
c, ns := f.ClientSet, f.Namespace.Name
deployment, err := c.AppsV1().Deployments(ns).Create(deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podList, err := framework.GetPodsForDeployment(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return podList
}

View File

@ -1,536 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is a fork of k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
package autoscaling
import (
"context"
"fmt"
"strconv"
"sync"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
ginkgo "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
dynamicConsumptionTimeInSeconds = 30
staticConsumptionTimeInSeconds = 3600
dynamicRequestSizeInMillicores = 20
dynamicRequestSizeInMegabytes = 100
dynamicRequestSizeCustomMetric = 10
port = 80
targetPort = 8080
timeoutRC = 120 * time.Second
startServiceTimeout = time.Minute
startServiceInterval = 5 * time.Second
rcIsNil = "ERROR: replicationController = nil"
deploymentIsNil = "ERROR: deployment = nil"
rsIsNil = "ERROR: replicaset = nil"
invalidKind = "ERROR: invalid workload kind for resource consumer"
customMetricName = "QPS"
serviceInitializationTimeout = 2 * time.Minute
serviceInitializationInterval = 15 * time.Second
)
var (
resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer)
resourceConsumerControllerImage = imageutils.GetE2EImage(imageutils.ResourceController)
)
var (
// KindRC var
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
// KindDeployment var
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
// KindReplicaSet var
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
subresource = "scale"
)
/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory
typical use case:
rc.ConsumeCPU(600)
// ... check your assumption here
rc.ConsumeCPU(300)
// ... check your assumption here
*/
type ResourceConsumer struct {
name string
controllerName string
kind schema.GroupVersionKind
nsName string
clientSet clientset.Interface
internalClientset *internalclientset.Clientset
cpu chan int
mem chan int
customMetric chan int
stopCPU chan int
stopMem chan int
stopCustomMetric chan int
stopWaitGroup sync.WaitGroup
consumptionTimeInSeconds int
sleepTime time.Duration
requestSizeInMillicores int
requestSizeInMegabytes int
requestSizeCustomMetric int
}
// GetResourceConsumerImage func
func GetResourceConsumerImage() string {
return resourceConsumerImage
}
// NewDynamicResourceConsumer func
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest resource.Quantity, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuRequest, memRequest, clientset, internalClientset)
}
// NewStaticResourceConsumer TODO this still defaults to replication controller
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest resource.Quantity, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuRequest, memRequest, clientset, internalClientset)
}
/*
NewResourceConsumer creates new ResourceConsumer
initCPUTotal argument is in millicores
initMemoryTotal argument is in megabytes
memRequest argument is in megabytes, it specifies the original Pod resource request
cpuRequest argument is in millicores, it specifies the original Pod resource request
*/
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuRequest, memRequest resource.Quantity, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuRequest, memRequest)
rc := &ResourceConsumer{
name: name,
controllerName: name + "-ctrl",
kind: kind,
nsName: nsName,
clientSet: clientset,
internalClientset: internalClientset,
cpu: make(chan int),
mem: make(chan int),
customMetric: make(chan int),
stopCPU: make(chan int),
stopMem: make(chan int),
stopCustomMetric: make(chan int),
consumptionTimeInSeconds: consumptionTimeInSeconds,
sleepTime: time.Duration(consumptionTimeInSeconds) * time.Second,
requestSizeInMillicores: requestSizeInMillicores,
requestSizeInMegabytes: requestSizeInMegabytes,
requestSizeCustomMetric: requestSizeCustomMetric,
}
go rc.makeConsumeCPURequests()
rc.ConsumeCPU(initCPUTotal)
go rc.makeConsumeMemRequests()
rc.ConsumeMem(initMemoryTotal)
go rc.makeConsumeCustomMetric()
rc.ConsumeCustomMetric(initCustomMetric)
return rc
}
// ConsumeCPU consumes given number of CPU
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
rc.cpu <- millicores
}
// ConsumeMem consumes given number of Mem
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
rc.mem <- megabytes
}
// ConsumeCustomMetric consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount
}
func (rc *ResourceConsumer) makeConsumeCPURequests() {
defer ginkgo.GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
millicores := 0
for {
select {
case millicores = <-rc.cpu:
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
rc.sendConsumeCPURequest(millicores)
sleepTime = rc.sleepTime
case <-rc.stopCPU:
framework.Logf("RC %s: stopping CPU consumer", rc.name)
return
}
}
}
func (rc *ResourceConsumer) makeConsumeMemRequests() {
defer ginkgo.GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
megabytes := 0
for {
select {
case megabytes = <-rc.mem:
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
rc.sendConsumeMemRequest(megabytes)
sleepTime = rc.sleepTime
case <-rc.stopMem:
framework.Logf("RC %s: stopping mem consumer", rc.name)
return
}
}
}
func (rc *ResourceConsumer) makeConsumeCustomMetric() {
defer ginkgo.GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
delta := 0
for {
select {
case delta := <-rc.customMetric:
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
rc.sendConsumeCustomMetric(delta)
sleepTime = rc.sleepTime
case <-rc.stopCustomMetric:
framework.Logf("RC %s: stopping metric consumer", rc.name)
return
}
}
}
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Name(rc.controllerName).
Suffix("ConsumeCPU").
Param("millicores", strconv.Itoa(millicores)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
framework.Logf("ConsumeCPU URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
framework.Logf("ConsumeCPU failure: %v", err)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
// sendConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Name(rc.controllerName).
Suffix("ConsumeMem").
Param("megabytes", strconv.Itoa(megabytes)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
framework.Logf("ConsumeMem URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
framework.Logf("ConsumeMem failure: %v", err)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
// sendConsumeCustomMetric sends POST request for custom metric consumption
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Name(rc.controllerName).
Suffix("BumpMetric").
Param("metric", customMetricName).
Param("delta", strconv.Itoa(delta)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
framework.Logf("ConsumeCustomMetric failure: %v", err)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
// GetReplicas func
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.ReadyReplicas)
case KindDeployment:
deployment, err := rc.clientSet.ExtensionsV1beta1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.ReadyReplicas)
case KindReplicaSet:
rs, err := rc.clientSet.ExtensionsV1beta1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
}
return int(rs.Status.ReadyReplicas)
default:
framework.Failf(invalidKind)
}
return 0
}
// WaitForReplicas func
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
})
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
}
// EnsureDesiredReplicas func
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
if replicas != desiredReplicas {
return false, fmt.Errorf("number of replicas changed unexpectedly")
}
return false, nil // Expected number of replicas found. Continue polling until timeout.
})
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
if err == wait.ErrWaitTimeout {
framework.Logf("Number of replicas was stable over %v", duration)
return
}
framework.ExpectNoErrorWithOffset(1, err)
}
// Pause stops background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Pause() {
ginkgo.By(fmt.Sprintf("HPA pausing RC %s", rc.name))
rc.stopCPU <- 0
rc.stopMem <- 0
rc.stopCustomMetric <- 0
rc.stopWaitGroup.Wait()
}
// Resume starts background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Resume() {
ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name))
go rc.makeConsumeCPURequests()
go rc.makeConsumeMemRequests()
go rc.makeConsumeCustomMetric()
}
// CleanUp func
func (rc *ResourceConsumer) CleanUp() {
ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name))
close(rc.stopCPU)
close(rc.stopMem)
close(rc.stopCustomMetric)
rc.stopWaitGroup.Wait()
// Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second)
kind := rc.kind.GroupKind()
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
}
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuRequest, memRequest resource.Quantity) {
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": name,
},
},
})
framework.ExpectNoError(err)
rcConfig := testutils.RCConfig{
Client: c,
InternalClient: internalClient,
Image: resourceConsumerImage,
Name: name,
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuRequest.MilliValue(),
MemRequest: memRequest.Value(),
}
switch kind {
case KindRC:
framework.ExpectNoError(framework.RunRC(rcConfig))
break
case KindDeployment:
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunDeployment(dpConfig))
break
case KindReplicaSet:
rsConfig := testutils.ReplicaSetConfig{
RCConfig: rcConfig,
}
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
break
default:
framework.Failf(invalidKind)
}
ginkgo.By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
_, err = c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": controllerName,
},
},
})
framework.ExpectNoError(err)
dnsClusterFirst := v1.DNSClusterFirst
controllerRcConfig := testutils.RCConfig{
Client: c,
Image: resourceConsumerControllerImage,
Name: controllerName,
Namespace: ns,
Timeout: timeoutRC,
Replicas: 1,
Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
DNSPolicy: &dnsClusterFirst,
}
framework.ExpectNoError(framework.RunRC(controllerRcConfig))
// Wait for endpoints to propagate for the controller service.
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
// CreateCPUHorizontalPodAutoscaler func
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
TargetCPUUtilizationPercentage: &cpu,
},
}
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
framework.ExpectNoError(errHPA)
return hpa
}
// DeleteHorizontalPodAutoscaler func
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
}

View File

@ -1,489 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"encoding/json"
"fmt"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
recommenderComponent = "recommender"
updateComponent = "updater"
admissionControllerComponent = "admission-controller"
fullVpaSuite = "full-vpa"
actuationSuite = "actuation"
pollInterval = 10 * time.Second
pollTimeout = 15 * time.Minute
// VpaEvictionTimeout is a timeout for VPA to restart a pod if there are no
// mechanisms blocking it (for example PDB).
VpaEvictionTimeout = 3 * time.Minute
defaultHamsterReplicas = int32(3)
)
var hamsterLabels = map[string]string{"app": "hamster"}
// SIGDescribe adds sig-autoscaling tag to test description.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe(fmt.Sprintf("[sig-autoscaling] %v", text), body)
}
// E2eDescribe describes a VPA e2e test.
func E2eDescribe(scenario, name string, body func()) bool {
return SIGDescribe(fmt.Sprintf("[VPA] [%s] [v1beta1] %s", scenario, name), body)
}
// RecommenderE2eDescribe describes a VPA recommender e2e test.
func RecommenderE2eDescribe(name string, body func()) bool {
return E2eDescribe(recommenderComponent, name, body)
}
// UpdaterE2eDescribe describes a VPA updater e2e test.
func UpdaterE2eDescribe(name string, body func()) bool {
return E2eDescribe(updateComponent, name, body)
}
// AdmissionControllerE2eDescribe describes a VPA admission controller e2e test.
func AdmissionControllerE2eDescribe(name string, body func()) bool {
return E2eDescribe(admissionControllerComponent, name, body)
}
// FullVpaE2eDescribe describes a VPA full stack e2e test.
func FullVpaE2eDescribe(name string, body func()) bool {
return E2eDescribe(fullVpaSuite, name, body)
}
// ActuationSuiteE2eDescribe describes a VPA actuation e2e test.
func ActuationSuiteE2eDescribe(name string, body func()) bool {
return E2eDescribe(actuationSuite, name, body)
}
// SetupHamsterDeployment creates and installs a simple hamster deployment
// for e2e test purposes, then makes sure the deployment is running.
func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas int32) *appsv1.Deployment {
cpuQuantity := ParseQuantityOrDie(cpu)
memoryQuantity := ParseQuantityOrDie(memory)
d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity)
d.Spec.Replicas = &replicas
d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(f.ClientSet, d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return d
}
// NewHamsterDeployment creates a simple hamster deployment for e2e test
// purposes.
func NewHamsterDeployment(f *framework.Framework) *appsv1.Deployment {
d := framework.NewDeployment("hamster-deployment", defaultHamsterReplicas, hamsterLabels, "hamster", "k8s.gcr.io/ubuntu-slim:0.1", appsv1.RollingUpdateDeploymentStrategyType)
d.ObjectMeta.Namespace = f.Namespace.Name
d.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"}
d.Spec.Template.Spec.Containers[0].Args = []string{"-c", "/usr/bin/yes >/dev/null"}
return d
}
// NewHamsterDeploymentWithResources creates a simple hamster deployment with specific
// resource requests for e2e test purposes.
func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeployment(f)
d.Spec.Template.Spec.Containers[0].Resources.Requests = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantity,
apiv1.ResourceMemory: memoryQuantity,
}
return d
}
// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific
// resource requests for e2e test purposes. Since the container in the pod specifies resource limits
// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed
// QoS class.
func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeployment(f)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantity,
apiv1.ResourceMemory: memoryQuantity,
}
return d
}
// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific
// resource requests and limits for e2e test purposes.
func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantityLimit,
apiv1.ResourceMemory: memoryQuantityLimit,
}
return d
}
// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
selector := fields.ParseSelectorOrDie("status.phase!=" + string(apiv1.PodSucceeded) +
",status.phase!=" + string(apiv1.PodFailed))
options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: selector.String()}
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(options)
}
// SetupVPA creates and installs a simple hamster VPA for e2e test purposes.
func SetupVPA(f *framework.Framework, cpu string, mode vpa_types.UpdateMode) {
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: hamsterLabels,
})
vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode
cpuQuantity := ParseQuantityOrDie(cpu)
resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity}
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: resourceList,
LowerBound: resourceList,
UpperBound: resourceList,
}},
}
InstallVPA(f, vpaCRD)
}
// NewVPA creates a VPA object for e2e test purposes.
func NewVPA(f *framework.Framework, name string, selector *metav1.LabelSelector) *vpa_types.VerticalPodAutoscaler {
updateMode := vpa_types.UpdateModeAuto
vpa := vpa_types.VerticalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: f.Namespace.Name,
},
Spec: vpa_types.VerticalPodAutoscalerSpec{
Selector: selector,
UpdatePolicy: &vpa_types.PodUpdatePolicy{
UpdateMode: &updateMode,
},
ResourcePolicy: &vpa_types.PodResourcePolicy{
ContainerPolicies: []vpa_types.ContainerResourcePolicy{},
},
},
}
return &vpa
}
type patchRecord struct {
Op string `json:"op,inline"`
Path string `json:"path,inline"`
Value interface{} `json:"value"`
}
func getVpaClientSet(f *framework.Framework) vpa_clientset.Interface {
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework")
return vpa_clientset.NewForConfigOrDie(config)
}
// InstallVPA installs a VPA object in the test cluster.
func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) {
vpaClientSet := getVpaClientSet(f)
_, err := vpaClientSet.AutoscalingV1beta1().VerticalPodAutoscalers(f.Namespace.Name).Create(vpa)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA")
}
// InstallRawVPA installs a VPA object passed in as raw json in the test cluster.
func InstallRawVPA(f *framework.Framework, obj interface{}) error {
vpaClientSet := getVpaClientSet(f)
err := vpaClientSet.AutoscalingV1beta1().RESTClient().Post().
Namespace(f.Namespace.Name).
Resource("verticalpodautoscalers").
Body(obj).
Do()
return err.Error()
}
// PatchVpaRecommendation installs a new reocmmendation for VPA object.
func PatchVpaRecommendation(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler,
recommendation *vpa_types.RecommendedPodResources) {
newStatus := vpa.Status.DeepCopy()
newStatus.Recommendation = recommendation
bytes, err := json.Marshal([]patchRecord{{
Op: "replace",
Path: "/status",
Value: *newStatus,
}})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
_, err = getVpaClientSet(f).AutoscalingV1beta2().VerticalPodAutoscalers(f.Namespace.Name).Patch(vpa.Name, types.JSONPatchType, bytes)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch VPA.")
}
// AnnotatePod adds annotation for an existing pod.
func AnnotatePod(f *framework.Framework, podName, annotationName, annotationValue string) {
bytes, err := json.Marshal([]patchRecord{{
Op: "add",
Path: fmt.Sprintf("/metadata/annotations/%v", annotationName),
Value: annotationValue,
}})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Patch(podName, types.JSONPatchType, bytes)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch pod.")
gomega.Expect(pod.Annotations[annotationName]).To(gomega.Equal(annotationValue))
}
// ParseQuantityOrDie parses quantity from string and dies with an error if
// unparsable.
func ParseQuantityOrDie(text string) resource.Quantity {
quantity, err := resource.ParseQuantity(text)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return quantity
}
// PodSet is a simplified representation of PodList mapping names to UIDs.
type PodSet map[string]types.UID
// MakePodSet converts PodList to podset for easier comparison of pod collections.
func MakePodSet(pods *apiv1.PodList) PodSet {
result := make(PodSet)
if pods == nil {
return result
}
for _, p := range pods.Items {
result[p.Name] = p.UID
}
return result
}
// WaitForPodsRestarted waits until some pods from the list are restarted.
func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error {
initialPodSet := MakePodSet(podList)
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
currentPodList, err := GetHamsterPods(f)
if err != nil {
return false, err
}
currentPodSet := MakePodSet(currentPodList)
return WerePodsSuccessfullyRestarted(currentPodSet, initialPodSet), nil
})
if err != nil {
return fmt.Errorf("waiting for set of pods changed: %v", err)
}
return nil
}
// WaitForPodsEvicted waits until some pods from the list are evicted.
func WaitForPodsEvicted(f *framework.Framework, podList *apiv1.PodList) error {
initialPodSet := MakePodSet(podList)
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
currentPodList, err := GetHamsterPods(f)
if err != nil {
return false, err
}
currentPodSet := MakePodSet(currentPodList)
return GetEvictedPodsCount(currentPodSet, initialPodSet) > 0, nil
})
if err != nil {
return fmt.Errorf("waiting for set of pods changed: %v", err)
}
return nil
}
// WerePodsSuccessfullyRestarted returns true if some pods from initialPodSet have been
// successfully restarted comparing to currentPodSet (pods were evicted and
// are running).
func WerePodsSuccessfullyRestarted(currentPodSet PodSet, initialPodSet PodSet) bool {
if len(currentPodSet) < len(initialPodSet) {
// If we have less pods running than in the beginning, there is a restart
// in progress - a pod was evicted but not yet recreated.
framework.Logf("Restart in progress")
return false
}
evictedCount := GetEvictedPodsCount(currentPodSet, initialPodSet)
framework.Logf("%v of initial pods were already evicted", evictedCount)
return evictedCount > 0
}
// GetEvictedPodsCount returns the count of pods from initialPodSet that have
// been evicted comparing to currentPodSet.
func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int {
diffs := 0
for name, initialUID := range initialPodSet {
currentUID, inCurrent := currentPodSet[name]
if !inCurrent {
diffs += 1
} else if initialUID != currentUID {
diffs += 1
}
}
return diffs
}
// CheckNoPodsEvicted waits for long enough period for VPA to start evicting
// pods and checks that no pods were restarted.
func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) {
time.Sleep(VpaEvictionTimeout)
currentPodList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet)
gomega.Expect(restarted).To(gomega.Equal(0))
}
// WaitForVPAMatch pools VPA object until match function returns true. Returns
// polled vpa object. On timeout returns error.
func WaitForVPAMatch(c *vpa_clientset.Clientset, vpa *vpa_types.VerticalPodAutoscaler, match func(vpa *vpa_types.VerticalPodAutoscaler) bool) (*vpa_types.VerticalPodAutoscaler, error) {
var polledVpa *vpa_types.VerticalPodAutoscaler
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
var err error
polledVpa, err = c.AutoscalingV1beta1().VerticalPodAutoscalers(vpa.Namespace).Get(vpa.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if match(polledVpa) {
return true, nil
}
return false, nil
})
if err != nil {
return nil, fmt.Errorf("error waiting for recommendation present in %v: %v", vpa.Name, err)
}
return polledVpa, nil
}
// WaitForRecommendationPresent pools VPA object until recommendations are not empty. Returns
// polled vpa object. On timeout returns error.
func WaitForRecommendationPresent(c *vpa_clientset.Clientset, vpa *vpa_types.VerticalPodAutoscaler) (*vpa_types.VerticalPodAutoscaler, error) {
return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool {
return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0
})
}
// WaitForConditionPresent pools VPA object until it contains condition with given type. On timeout returns an error.
func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.VerticalPodAutoscaler, expectedConditionType string) (*vpa_types.VerticalPodAutoscaler, error) {
return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool {
for _, condition := range vpa.Status.Conditions {
if string(condition.Type) == expectedConditionType {
return true
}
}
return false
})
}
func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity, lrType apiv1.LimitType) {
lr := &apiv1.LimitRange{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "hamster-lr",
},
Spec: apiv1.LimitRangeSpec{
Limits: []apiv1.LimitRangeItem{},
},
}
if maxMemoryLimit != nil || maxCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: lrType,
Max: apiv1.ResourceList{},
}
if maxCpuLimit != nil {
lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit
}
if maxMemoryLimit != nil {
lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit
}
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
}
if minMemoryLimit != nil || minCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: lrType,
Min: apiv1.ResourceList{},
}
if minCpuLimit != nil {
lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit
}
if minMemoryLimit != nil {
lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit
}
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
}
_, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range")
}
// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string, lrType apiv1.LimitType) {
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit))
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity, lrType)
}
// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory.
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string, lrType apiv1.LimitType) {
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit))
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil, lrType)
}
// SetupVPAForTwoHamsters creates and installs a simple pod with two hamster containers for e2e test purposes.
func SetupVPAForTwoHamsters(f *framework.Framework, cpu string, mode vpa_types.UpdateMode) {
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: hamsterLabels,
})
vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode
cpuQuantity := ParseQuantityOrDie(cpu)
resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity}
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "hamster",
Target: resourceList,
LowerBound: resourceList,
UpperBound: resourceList,
},
{
ContainerName: "hamster2",
Target: resourceList,
LowerBound: resourceList,
UpperBound: resourceList,
},
},
}
InstallVPA(f, vpaCRD)
}

View File

@ -1,210 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
// This file is a cut down fork of k8s/test/e2e/e2e.go
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/component-base/logs"
// needed to authorize to GKE cluster
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/version"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
"k8s.io/kubernetes/test/e2e/framework/metrics"
)
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
c, err := framework.LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
[]string{
metav1.NamespaceSystem,
metav1.NamespaceDefault,
metav1.NamespacePublic,
})
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
// runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.
framework.Logf("e2e test version: %s", version.Get().GitVersion)
dc := c.DiscoveryClient
serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}
// Reference common test to make the import valid.
commontest.CurrentSuite = commontest.E2E
return nil
}, func(data []byte) {
framework.Logf("No cloud config support.")
})
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all node")
framework.RunCleanupActions()
}, func() {
// Run only Ginkgo on node 1
framework.Logf("Running AfterSuite actions on node 1")
if framework.TestContext.ReportDir != "" {
framework.CoreDump(framework.TestContext.ReportDir)
}
if framework.TestContext.GatherSuiteMetricsAfterTest {
if err := gatherTestSuiteMetrics(); err != nil {
framework.Logf("Error gathering metrics: %v", err)
}
}
})
func gatherTestSuiteMetrics() error {
framework.Logf("Gathering metrics")
c, err := framework.LoadClientset()
if err != nil {
return fmt.Errorf("error loading client: %v", err)
}
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
grabber, err := metrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
}
received, err := grabber.Grab()
if err != nil {
return fmt.Errorf("failed to grab metrics: %v", err)
}
metricsForE2E := (*framework.MetricsForE2E)(&received)
metricsJSON := metricsForE2E.PrintJSON()
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
return fmt.Errorf("error writing to %q: %v", filePath, err)
}
} else {
framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJSON)
}
return nil
}
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func RunE2ETests(t *testing.T) {
runtimeutils.ReallyCrash = true
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(ginkgowrapper.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
var r []ginkgo.Reporter
if framework.TestContext.ReportDir != "" {
// TODO: we should probably only be trying to create this directory once
// rather than once-per-Ginkgo-node.
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
} else {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, "junit_02.xml")))
}
}
klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}

View File

@ -1,39 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"os"
"testing"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/viperconfig"
)
func init() {
framework.HandleFlags()
if err := viperconfig.ViperizeFlags("", "e2e"); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
framework.AfterReadingAllFlags(&framework.TestContext)
}
func TestVpaE2E(t *testing.T) {
RunE2ETests(t)
}

View File

@ -1,160 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
e2e_common "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
minimalCPULowerBound = "20m"
minimalCPUUpperBound = "100m"
minimalMemoryLowerBound = "20Mi"
minimalMemoryUpperBound = "300Mi"
// the initial values should be outside minimal bounds
initialCPU = "10m"
initialMemory = "10Mi"
)
var _ = FullVpaE2eDescribe("Pods under VPA", func() {
var (
rc *ResourceConsumer
vpaClientSet *vpa_clientset.Clientset
vpaCRD *vpa_types.VerticalPodAutoscaler
)
replicas := 3
ginkgo.AfterEach(func() {
rc.CleanUp()
})
// This schedules AfterEach block that needs to run after the AfterEach above and
// BeforeEach that needs to run before the BeforeEach below - thus the order of these matters.
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
ginkgo.BeforeEach(func() {
ns := f.Namespace.Name
ginkgo.By("Setting up a hamster deployment")
rc = NewDynamicResourceConsumer("hamster", ns, e2e_common.KindDeployment,
replicas,
1, /*initCPUTotal*/
10, /*initMemoryTotal*/
1, /*initCustomMetric*/
ParseQuantityOrDie(initialCPU), /*cpuRequest*/
ParseQuantityOrDie(initialMemory), /*memRequest*/
f.ClientSet,
f.InternalClientset)
ginkgo.By("Setting up a VPA CRD")
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
vpaCRD = NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": "hamster",
},
})
vpaClientSet = vpa_clientset.NewForConfigOrDie(config)
vpaClient := vpaClientSet.AutoscalingV1beta1()
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.It("have cpu requests growing with usage", func() {
// initial CPU usage is low so a minimal recommendation is expected
err := waitForResourceRequestInRangeInPods(
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// consume more CPU to get a higher recommendation
rc.ConsumeCPU(600 * replicas)
err = waitForResourceRequestInRangeInPods(
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
ParseQuantityOrDie("500m"), ParseQuantityOrDie("900m"))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.It("have memory requests growing with usage", func() {
// initial memory usage is low so a minimal recommendation is expected
err := waitForResourceRequestInRangeInPods(
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
ParseQuantityOrDie(minimalMemoryLowerBound), ParseQuantityOrDie(minimalMemoryUpperBound))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// consume more memory to get a higher recommendation
// NOTE: large range given due to unpredictability of actual memory usage
rc.ConsumeMem(1024 * replicas)
err = waitForResourceRequestInRangeInPods(
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
ParseQuantityOrDie("900Mi"), ParseQuantityOrDie("4000Mi"))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
})
func waitForPodsMatch(f *framework.Framework, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error {
return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
ns := f.Namespace.Name
c := f.ClientSet
podList, err := c.CoreV1().Pods(ns).List(listOptions)
if err != nil {
return false, err
}
if len(podList.Items) == 0 {
return false, nil
}
for _, pod := range podList.Items {
if !matcher(pod) {
return false, nil
}
}
return true, nil
})
}
func waitForResourceRequestInRangeInPods(f *framework.Framework, listOptions metav1.ListOptions, resourceName apiv1.ResourceName, lowerBound, upperBound resource.Quantity) error {
err := waitForPodsMatch(f, listOptions,
func(pod apiv1.Pod) bool {
resourceRequest, found := pod.Spec.Containers[0].Resources.Requests[resourceName]
framework.Logf("Comparing %v request %v against range of (%v, %v)", resourceName, resourceRequest, lowerBound, upperBound)
return found && resourceRequest.MilliValue() > lowerBound.MilliValue() && resourceRequest.MilliValue() < upperBound.MilliValue()
})
if err != nil {
return fmt.Errorf("error waiting for %s request in range of (%v,%v) for pods: %+v", resourceName, lowerBound, upperBound, listOptions)
}
return nil
}

View File

@ -1,254 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"strings"
"time"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
type resourceRecommendation struct {
target, lower, upper int64
}
func (r *resourceRecommendation) sub(other *resourceRecommendation) resourceRecommendation {
return resourceRecommendation{
target: r.target - other.target,
lower: r.lower - other.lower,
upper: r.upper - other.upper,
}
}
func getResourceRecommendation(containerRecommendation *vpa_types.RecommendedContainerResources, r apiv1.ResourceName) resourceRecommendation {
getOrZero := func(resourceList apiv1.ResourceList) int64 {
value, found := resourceList[r]
if found {
return value.Value()
}
return 0
}
return resourceRecommendation{
target: getOrZero(containerRecommendation.Target),
lower: getOrZero(containerRecommendation.LowerBound),
upper: getOrZero(containerRecommendation.UpperBound),
}
}
type recommendationChange struct {
oldMissing, newMissing bool
diff resourceRecommendation
}
type observer struct {
channel chan recommendationChange
}
func (*observer) OnAdd(obj interface{}) {}
func (*observer) OnDelete(obj interface{}) {}
func (o *observer) OnUpdate(oldObj, newObj interface{}) {
get := func(vpa *vpa_types.VerticalPodAutoscaler) (result resourceRecommendation, found bool) {
if vpa.Status.Recommendation == nil || len(vpa.Status.Recommendation.ContainerRecommendations) == 0 {
found = false
result = resourceRecommendation{}
} else {
found = true
result = getResourceRecommendation(&vpa.Status.Recommendation.ContainerRecommendations[0], apiv1.ResourceCPU)
}
return
}
oldVPA, _ := oldObj.(*vpa_types.VerticalPodAutoscaler)
NewVPA, _ := newObj.(*vpa_types.VerticalPodAutoscaler)
oldRecommendation, oldFound := get(oldVPA)
newRecommendation, newFound := get(NewVPA)
result := recommendationChange{
oldMissing: !oldFound,
newMissing: !newFound,
diff: newRecommendation.sub(&oldRecommendation),
}
go func() { o.channel <- result }()
}
func getVpaObserver(vpaClientSet *vpa_clientset.Clientset) *observer {
vpaListWatch := cache.NewListWatchFromClient(vpaClientSet.AutoscalingV1beta1().RESTClient(), "verticalpodautoscalers", apiv1.NamespaceAll, fields.Everything())
vpaObserver := observer{channel: make(chan recommendationChange)}
_, controller := cache.NewIndexerInformer(vpaListWatch,
&vpa_types.VerticalPodAutoscaler{},
1*time.Hour,
&vpaObserver,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
go controller.Run(make(chan struct{}))
if !cache.WaitForCacheSync(make(chan struct{}), controller.HasSynced) {
klog.Fatalf("Failed to sync VPA cache during initialization")
} else {
klog.Info("Initial VPA synced successfully")
}
return &vpaObserver
}
var _ = RecommenderE2eDescribe("Checkpoints", func() {
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
ginkgo.It("with missing VPA objects are garbage collected", func() {
ns := f.Namespace.Name
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
checkpoint := vpa_types.VerticalPodAutoscalerCheckpoint{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: ns,
},
Spec: vpa_types.VerticalPodAutoscalerCheckpointSpec{
VPAObjectName: "some-vpa",
},
}
vpaClientSet := vpa_clientset.NewForConfigOrDie(config)
_, err = vpaClientSet.AutoscalingV1beta1().VerticalPodAutoscalerCheckpoints(ns).Create(&checkpoint)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
time.Sleep(15 * time.Minute)
list, err := vpaClientSet.AutoscalingV1beta1().VerticalPodAutoscalerCheckpoints(ns).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(list.Items).To(gomega.BeEmpty())
})
})
var _ = RecommenderE2eDescribe("VPA CRD object", func() {
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
var (
vpaCRD *vpa_types.VerticalPodAutoscaler
vpaClientSet *vpa_clientset.Clientset
)
ginkgo.BeforeEach(func() {
ginkgo.By("Setting up a hamster deployment")
c := f.ClientSet
ns := f.Namespace.Name
cpuQuantity := ParseQuantityOrDie("100m")
memoryQuantity := ParseQuantityOrDie("100Mi")
d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity)
_, err := c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Setting up a VPA CRD")
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
vpaCRD = NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "hamster",
},
})
vpaClientSet = vpa_clientset.NewForConfigOrDie(config)
vpaClient := vpaClientSet.AutoscalingV1beta1()
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.It("serves recommendation and is marked deprecated", func() {
ginkgo.By("Waiting for recommendation to be filled")
_, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
_, err = WaitForConditionPresent(vpaClientSet, vpaCRD, "ConfigDeprecated")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.It("doesn't drop lower/upper after recommender's restart", func() {
o := getVpaObserver(vpaClientSet)
ginkgo.By("Waiting for recommendation to be filled")
_, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Drain diffs")
out:
for {
select {
case recommendationDiff := <-o.channel:
fmt.Println("Dropping recommendation diff", recommendationDiff)
default:
break out
}
}
ginkgo.By("Deleting recommender")
gomega.Expect(deleteRecommender(f.ClientSet)).To(gomega.BeNil())
ginkgo.By("Accumulating diffs after restart")
time.Sleep(5 * time.Minute)
changeDetected := false
finish:
for {
select {
case recommendationDiff := <-o.channel:
fmt.Println("checking recommendation diff", recommendationDiff)
changeDetected = true
gomega.Expect(recommendationDiff.oldMissing).To(gomega.Equal(false))
gomega.Expect(recommendationDiff.newMissing).To(gomega.Equal(false))
gomega.Expect(recommendationDiff.diff.lower).Should(gomega.BeNumerically(">=", 0))
gomega.Expect(recommendationDiff.diff.upper).Should(gomega.BeNumerically("<=", 0))
default:
break finish
}
}
gomega.Expect(changeDetected).To(gomega.Equal(true))
})
})
func deleteRecommender(c clientset.Interface) error {
namespace := "kube-system"
listOptions := metav1.ListOptions{}
podList, err := c.CoreV1().Pods(namespace).List(listOptions)
if err != nil {
fmt.Println("Could not list pods.", err)
return err
}
fmt.Print("Pods list items:", len(podList.Items))
for _, pod := range podList.Items {
if strings.HasPrefix(pod.Name, "vpa-recommender") {
fmt.Print("Deleting pod.", namespace, pod.Name)
err := c.CoreV1().Pods(namespace).Delete(pod.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
return nil
}
}
return fmt.Errorf("vpa recommender not found")
}

View File

@ -1,336 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = UpdaterE2eDescribe("Updater", func() {
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
ginkgo.It("evicts pods in a Deployment", func() {
testEvictsPods(f, "Deployment")
})
ginkgo.It("evicts pods in a Replication Controller", func() {
testEvictsPods(f, "ReplicationController")
})
ginkgo.It("evicts pods in a Job", func() {
testEvictsPods(f, "Job")
})
ginkgo.It("evicts pods in a ReplicaSet", func() {
testEvictsPods(f, "ReplicaSet")
})
ginkgo.It("evicts pods in a StatefulSet", func() {
testEvictsPods(f, "StatefulSet")
})
ginkgo.It("observes pod disruption budget", func() {
ginkgo.By("Setting up a hamster deployment")
c := f.ClientSet
ns := f.Namespace.Name
SetupHamsterDeployment(f, "10m", "10Mi", 10)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podSet := MakePodSet(podList)
ginkgo.By("Setting up prohibitive PDB for hamster deployment")
pdb := setupPDB(f, "hamster-pdb", 0 /* maxUnavailable */)
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "25m", vpa_types.UpdateModeAuto)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
ginkgo.By("Updating the PDB to allow for multiple pods to be evicted")
// We will check that 7 replicas are evicted in 3 minutes, which translates
// to 3 updater loops. This gives us relatively good confidence that updater
// evicts more than one pod in a loop if PDB allows it.
permissiveMaxUnavailable := 7
// Creating new PDB and removing old one, since PDBs are immutable at the moment
setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable)
err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(pdb.Name, &metav1.DeleteOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String()))
time.Sleep(VpaEvictionTimeout)
ginkgo.By("Checking enough pods were evicted.")
currentPodList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
evictedCount := GetEvictedPodsCount(MakePodSet(currentPodList), podSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
})
ginkgo.It("observes container min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "50m", vpa_types.UpdateModeAuto)
// Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min
// request is 100m request and 300m limit
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
ginkgo.It("observes pod max in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPAForTwoHamsters(f, "200m", vpa_types.UpdateModeAuto)
// Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m,
// while recommendation is 200m
// Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
ginkgo.It("observes pod min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPAForTwoHamsters(f, "50m", vpa_types.UpdateModeAuto)
// Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both
// to limit and request so min request is 100m request and 300m limit
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
})
func testEvictsPods(f *framework.Framework, controllerKind string) {
ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controllerKind))
setupHamsterController(f, controllerKind, "100m", "100Mi", defaultHamsterReplicas)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "200m", vpa_types.UpdateModeAuto)
ginkgo.By("Waiting for pods to be evicted")
err = WaitForPodsEvicted(f, podList)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory string, replicas int32) *apiv1.PodList {
switch controllerKind {
case "Deployment":
SetupHamsterDeployment(f, cpu, memory, replicas)
case "ReplicationController":
setupHamsterReplicationController(f, cpu, memory, replicas)
case "Job":
setupHamsterJob(f, cpu, memory, replicas)
case "ReplicaSet":
setupHamsterRS(f, cpu, memory, replicas)
case "StatefulSet":
setupHamsterStateful(f, cpu, memory, replicas)
default:
framework.Failf("Unknown controller kind: %v", controllerKind)
return nil
}
pods, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return pods
}
func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) {
hamsterContainer := setupHamsterContainer(cpu, memory)
rc := framework.RcByNameContainer("hamster-rc", replicas, "k8s.gcr.io/ubuntu-slim:0.1",
hamsterLabels, hamsterContainer, nil)
rc.Namespace = f.Namespace.Name
err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = waitForRCPodsRunning(f, rc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func waitForRCPodsRunning(f *framework.Framework, rc *apiv1.ReplicationController) error {
return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
podList, err := GetHamsterPods(f)
if err != nil {
framework.Logf("Error listing pods, retrying: %v", err)
return false, nil
}
podsRunning := int32(0)
for _, pod := range podList.Items {
if pod.Status.Phase == apiv1.PodRunning {
podsRunning += 1
}
}
return podsRunning == *rc.Spec.Replicas, nil
})
}
func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) {
job := framework.NewTestJob("notTerminate", "hamster-job", apiv1.RestartPolicyOnFailure,
replicas, replicas, nil, 10)
job.Spec.Template.Spec.Containers[0] = setupHamsterContainer(cpu, memory)
for label, value := range hamsterLabels {
job.Spec.Template.Labels[label] = value
}
err := testutils.CreateJobWithRetries(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) {
rs := framework.NewReplicaSet("hamster-rs", f.Namespace.Name, replicas,
hamsterLabels, "", "")
rs.Spec.Template.Spec.Containers[0] = setupHamsterContainer(cpu, memory)
err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForReadyReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func setupHamsterStateful(f *framework.Framework, cpu, memory string, replicas int32) {
stateful := framework.NewStatefulSet("hamster-stateful", f.Namespace.Name,
"hamster-service", replicas, nil, nil, hamsterLabels)
stateful.Spec.Template.Spec.Containers[0] = setupHamsterContainer(cpu, memory)
err := createStatefulSetSetWithRetries(f.ClientSet, f.Namespace.Name, stateful)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
tester := framework.NewStatefulSetTester(f.ClientSet)
tester.WaitForRunningAndReady(*stateful.Spec.Replicas, stateful)
}
func setupHamsterContainer(cpu, memory string) apiv1.Container {
cpuQuantity := ParseQuantityOrDie(cpu)
memoryQuantity := ParseQuantityOrDie(memory)
return apiv1.Container{
Name: "hamster",
Image: "k8s.gcr.io/ubuntu-slim:0.1",
Resources: apiv1.ResourceRequirements{
Requests: apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantity,
apiv1.ResourceMemory: memoryQuantity,
},
},
Command: []string{"/bin/sh"},
Args: []string{"-c", "while true; do sleep 10 ; done"},
}
}
func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1beta1.PodDisruptionBudget {
maxUnavailableIntstr := intstr.FromInt(maxUnavailable)
pdb := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
MaxUnavailable: &maxUnavailableIntstr,
Selector: &metav1.LabelSelector{
MatchLabels: hamsterLabels,
},
},
}
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(pdb)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return pdb
}
func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment) PodSet {
podList, err := framework.GetPodsForDeployment(c, d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return MakePodSet(podList)
}
func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error {
if obj == nil {
return fmt.Errorf("object provided to create is empty")
}
createFunc := func() (bool, error) {
_, err := c.AppsV1().ReplicaSets(namespace).Create(obj)
if err == nil || apierrs.IsAlreadyExists(err) {
return true, nil
}
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
}
return testutils.RetryWithExponentialBackOff(createFunc)
}
func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error {
if obj == nil {
return fmt.Errorf("object provided to create is empty")
}
createFunc := func() (bool, error) {
_, err := c.AppsV1().StatefulSets(namespace).Create(obj)
if err == nil || apierrs.IsAlreadyExists(err) {
return true, nil
}
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
}
return testutils.RetryWithExponentialBackOff(createFunc)
}