APImock replaced with a real API in the Updater.

This commit is contained in:
Slawomir Chylek 2018-01-19 14:33:26 +01:00
parent f136e1af8b
commit c9f5b0a41c
6 changed files with 124 additions and 114 deletions

View File

@ -18,13 +18,15 @@ package test
import (
"fmt"
"log"
"github.com/stretchr/testify/mock"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/autoscaler/vertical-pod-autoscaler/apimock"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1"
vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1"
v1 "k8s.io/client-go/listers/core/v1"
)
@ -88,52 +90,72 @@ func BuildTestContainer(containerName, cpu, mem string) apiv1.Container {
}
// BuildTestPolicy creates ResourcesPolicy with specified constraints
func BuildTestPolicy(containerName, minCpu, maxCpu, minMemory, maxMemory string) *apimock.ResourcesPolicy {
func BuildTestPolicy(containerName, minCpu, maxCpu, minMemory, maxMemory string) *vpa_types.PodResourcePolicy {
minCpuVal, _ := resource.ParseQuantity(minCpu)
maxCpuVal, _ := resource.ParseQuantity(maxCpu)
minMemVal, _ := resource.ParseQuantity(minMemory)
maxMemVal, _ := resource.ParseQuantity(maxMemory)
return &apimock.ResourcesPolicy{Containers: []apimock.ContainerPolicy{{
return &vpa_types.PodResourcePolicy{ContainerPolicies: []vpa_types.ContainerResourcePolicy{{
Name: containerName,
ResourcePolicy: map[apiv1.ResourceName]apimock.Policy{
apiv1.ResourceMemory: {
Min: minMemVal,
Max: maxMemVal},
apiv1.ResourceCPU: {
Min: minCpuVal,
Max: maxCpuVal}},
MinAllowed: apiv1.ResourceList{
apiv1.ResourceMemory: minMemVal,
apiv1.ResourceCPU: minCpuVal,
},
MaxAllowed: apiv1.ResourceList{
apiv1.ResourceMemory: maxMemVal,
apiv1.ResourceCPU: maxCpuVal,
},
},
}}
}
// BuildTestVerticalPodAutoscaler creates VerticalPodAutoscaler withs specified policy constraints
func BuildTestVerticalPodAutoscaler(containerName, minCpu, maxCpu, minMemory, maxMemory string, selector string) *apimock.VerticalPodAutoscaler {
func BuildTestVerticalPodAutoscaler(containerName, targetCpu, minCpu, maxCpu, targetMemory, minMemory, maxMemory string, selector string) *vpa_types.VerticalPodAutoscaler {
resourcesPolicy := BuildTestPolicy(containerName, minCpu, maxCpu, minMemory, maxMemory)
return &apimock.VerticalPodAutoscaler{
Spec: apimock.Spec{
Target: apimock.Target{Selector: selector},
UpdatePolicy: apimock.UpdatePolicy{Mode: apimock.Mode{}},
ResourcesPolicy: *resourcesPolicy,
labelSelector, err := metav1.ParseToLabelSelector(selector)
if err != nil {
log.Fatal(err)
}
targetCpuVal, _ := resource.ParseQuantity(targetCpu)
targetMemoryVal, _ := resource.ParseQuantity(targetMemory)
return &vpa_types.VerticalPodAutoscaler{
Spec: vpa_types.VerticalPodAutoscalerSpec{
Selector: labelSelector,
UpdatePolicy: vpa_types.PodUpdatePolicy{},
ResourcePolicy: *resourcesPolicy,
},
Status: vpa_types.VerticalPodAutoscalerStatus{
Recommendation: vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
Name: containerName,
Target: apiv1.ResourceList{
apiv1.ResourceMemory: targetMemoryVal,
apiv1.ResourceCPU: targetCpuVal,
},
},
},
},
},
}
}
// Recommendation creates Recommendation with specified container name and resources
func Recommendation(containerName, cpu, mem string) *apimock.Recommendation {
result := &apimock.Recommendation{Containers: []apimock.ContainerRecommendation{
func Recommendation(containerName, cpu, mem string) *vpa_types.RecommendedPodResources {
result := &vpa_types.RecommendedPodResources{ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{Name: containerName,
Resources: make(map[apiv1.ResourceName]resource.Quantity, 0)}},
Target: make(map[apiv1.ResourceName]resource.Quantity, 0)}},
}
if len(cpu) > 0 {
cpuVal, _ := resource.ParseQuantity(cpu)
result.Containers[0].Resources[apiv1.ResourceCPU] = cpuVal
result.ContainerRecommendations[0].Target[apiv1.ResourceCPU] = cpuVal
}
if len(mem) > 0 {
memVal, _ := resource.ParseQuantity(mem)
result.Containers[0].Resources[apiv1.ResourceMemory] = memVal
result.ContainerRecommendations[0].Target[apiv1.ResourceMemory] = memVal
}
return result
@ -145,11 +167,11 @@ type RecommenderAPIMock struct {
}
// GetRecommendation is mock implementation of RecommenderAPI.GetRecommendation
func (m *RecommenderAPIMock) GetRecommendation(spec *apiv1.PodSpec) (*apimock.Recommendation, error) {
func (m *RecommenderAPIMock) GetRecommendation(spec *apiv1.PodSpec) (*vpa_types.RecommendedPodResources, error) {
args := m.Called(spec)
var returnArg *apimock.Recommendation
var returnArg *vpa_types.RecommendedPodResources
if args.Get(0) != nil {
returnArg = args.Get(0).(*apimock.Recommendation)
returnArg = args.Get(0).(*vpa_types.RecommendedPodResources)
}
return returnArg, args.Error(1)
}
@ -160,11 +182,11 @@ type RecommenderMock struct {
}
// Get is a mock implementation of Recommender.Get
func (m *RecommenderMock) Get(spec *apiv1.PodSpec) (*apimock.Recommendation, error) {
func (m *RecommenderMock) Get(spec *apiv1.PodSpec) (*vpa_types.RecommendedPodResources, error) {
args := m.Called(spec)
var returnArg *apimock.Recommendation
var returnArg *vpa_types.RecommendedPodResources
if args.Get(0) != nil {
returnArg = args.Get(0).(*apimock.Recommendation)
returnArg = args.Get(0).(*vpa_types.RecommendedPodResources)
}
return returnArg, args.Error(1)
}
@ -217,11 +239,16 @@ type VerticalPodAutoscalerListerMock struct {
}
// List is a mock implementation of VerticalPodAutoscalerLister.List
func (m *VerticalPodAutoscalerListerMock) List() (ret []*apimock.VerticalPodAutoscaler, err error) {
func (m *VerticalPodAutoscalerListerMock) List(selector labels.Selector) (ret []*vpa_types.VerticalPodAutoscaler, err error) {
args := m.Called()
var returnArg []*apimock.VerticalPodAutoscaler
var returnArg []*vpa_types.VerticalPodAutoscaler
if args.Get(0) != nil {
returnArg = args.Get(0).([]*apimock.VerticalPodAutoscaler)
returnArg = args.Get(0).([]*vpa_types.VerticalPodAutoscaler)
}
return returnArg, args.Error(1)
}
// VerticalPodAutoscalers is not implemented for this mock
func (m *VerticalPodAutoscalerListerMock) VerticalPodAutoscalers(namespace string) vpa_lister.VerticalPodAutoscalerNamespaceLister {
return nil
}

View File

@ -20,6 +20,7 @@ import (
"flag"
"github.com/golang/glog"
kube_flag "k8s.io/apiserver/pkg/util/flag"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
kube_client "k8s.io/client-go/kubernetes"
kube_restclient "k8s.io/client-go/rest"
"time"
@ -45,8 +46,8 @@ func main() {
// TODO monitoring
kubeClient := createKubeClient()
updater := NewUpdater(kubeClient, *recommendationsCacheTtl, *minReplicas, *evictionToleranceFraction)
kubeClient, vpaClient := createKubeClients()
updater := NewUpdater(kubeClient, vpaClient, *recommendationsCacheTtl, *minReplicas, *evictionToleranceFraction)
for {
select {
case <-time.After(*updaterInterval):
@ -57,10 +58,10 @@ func main() {
}
}
func createKubeClient() kube_client.Interface {
func createKubeClients() (kube_client.Interface, *vpa_clientset.Clientset) {
config, err := kube_restclient.InClusterConfig()
if err != nil {
glog.Fatalf("Failed to build Kubernetes client : fail to create config: %v", err)
}
return kube_client.NewForConfigOrDie(config)
return kube_client.NewForConfigOrDie(config), vpa_clientset.NewForConfigOrDie(config)
}

View File

@ -20,7 +20,7 @@ import (
"math"
"sort"
"k8s.io/autoscaler/vertical-pod-autoscaler/apimock"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -39,8 +39,7 @@ const (
// i.e. pod with 10M current memory and recommendation 20M will have higher update priority
// than pod with 100M current memory and 150M recommendation (100% increase vs 50% increase)
type UpdatePriorityCalculator struct {
resourcesPolicy *apimock.ResourcesPolicy
cpuPolicy *apimock.Policy
resourcesPolicy *vpa_types.PodResourcePolicy
pods []podPriority
config *UpdateConfig
}
@ -55,7 +54,7 @@ type UpdateConfig struct {
// NewUpdatePriorityCalculator creates new UpdatePriorityCalculator for the given resources policy and configuration.
// If the given policy is nil, there will be no policy restriction on update.
// If the given config is nil, default values are used.
func NewUpdatePriorityCalculator(policy *apimock.ResourcesPolicy, config *UpdateConfig) UpdatePriorityCalculator {
func NewUpdatePriorityCalculator(policy *vpa_types.PodResourcePolicy, config *UpdateConfig) UpdatePriorityCalculator {
if config == nil {
config = &UpdateConfig{MinChangePriority: defaultUpdateThreshod}
}
@ -63,7 +62,7 @@ func NewUpdatePriorityCalculator(policy *apimock.ResourcesPolicy, config *Update
}
// AddPod adds pod to the UpdatePriorityCalculator.
func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, recommendation *apimock.Recommendation) {
func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, recommendation *vpa_types.RecommendedPodResources) {
updatePriority := calc.getUpdatePriority(pod, recommendation)
if updatePriority < calc.config.MinChangePriority {
@ -87,7 +86,7 @@ func (calc *UpdatePriorityCalculator) GetSortedPods() []*apiv1.Pod {
return result
}
func (calc *UpdatePriorityCalculator) getUpdatePriority(pod *apiv1.Pod, recommendation *apimock.Recommendation) float64 {
func (calc *UpdatePriorityCalculator) getUpdatePriority(pod *apiv1.Pod, recommendation *vpa_types.RecommendedPodResources) float64 {
var priority float64
for _, podContainer := range pod.Spec.Containers {
@ -99,28 +98,28 @@ func (calc *UpdatePriorityCalculator) getUpdatePriority(pod *apiv1.Pod, recommen
containerPolicy := getContainerPolicy(podContainer.Name, calc.resourcesPolicy)
for resourceName, recommended := range cr.Resources {
var (
resourceRequested *resource.Quantity
resourcePolicy *apimock.Policy
)
for resourceName, recommended := range cr.Target {
var requested, min, max *resource.Quantity
if request, ok := podContainer.Resources.Requests[resourceName]; ok {
resourceRequested = &request
requested = &request
}
if containerPolicy != nil {
if policy, ok := (*containerPolicy)[resourceName]; ok {
resourcePolicy = &policy
if minAllowed, ok := containerPolicy.MinAllowed[resourceName]; ok {
min = &minAllowed
}
if maxAllowed, ok := containerPolicy.MaxAllowed[resourceName]; ok {
max = &maxAllowed
}
}
resourceDiff := getPercentageDiff(resourceRequested, resourcePolicy, &recommended)
resourceDiff := getPercentageDiff(requested, min, max, &recommended)
priority += math.Abs(resourceDiff)
}
}
return priority
}
func getPercentageDiff(request *resource.Quantity, policy *apimock.Policy, recommendation *resource.Quantity) float64 {
func getPercentageDiff(request, min, max, recommendation *resource.Quantity) float64 {
if request == nil {
// resource requirement is not currently specified
// any recommendation for this resource we will treat as 100% change
@ -130,35 +129,33 @@ func getPercentageDiff(request *resource.Quantity, policy *apimock.Policy, recom
return 0
}
recommended := recommendation.Value()
if policy != nil {
if !policy.Min.IsZero() && recommendation.Value() < policy.Min.Value() {
if min != nil && !min.IsZero() && recommendation.Value() < min.Value() {
glog.Warningf("recommendation outside of policy bounds : min value : %v recommended : %v",
policy.Min.Value(), recommended)
recommended = policy.Min.Value()
min.Value(), recommended)
recommended = min.Value()
}
if !policy.Max.IsZero() && recommendation.Value() > policy.Max.Value() {
if max != nil && !max.IsZero() && recommendation.Value() > max.Value() {
glog.Warningf("recommendation outside of policy bounds : max value : %v recommended : %v",
policy.Max.Value(), recommended)
recommended = policy.Max.Value()
}
max.Value(), recommended)
recommended = max.Value()
}
diff := recommended - request.Value()
return float64(diff) / float64(request.Value())
}
func getContainerPolicy(containerName string, policy *apimock.ResourcesPolicy) *map[apiv1.ResourceName]apimock.Policy {
func getContainerPolicy(containerName string, policy *vpa_types.PodResourcePolicy) *vpa_types.ContainerResourcePolicy {
if policy != nil {
for _, container := range policy.Containers {
for _, container := range policy.ContainerPolicies {
if containerName == container.Name {
return &container.ResourcePolicy
return &container
}
}
}
return nil
}
func getContainerRecommendation(containerName string, recommendation *apimock.Recommendation) *apimock.ContainerRecommendation {
for _, container := range recommendation.Containers {
func getContainerRecommendation(containerName string, recommendation *vpa_types.RecommendedPodResources) *vpa_types.RecommendedContainerResources {
for _, container := range recommendation.ContainerRecommendations {
if containerName == container.Name {
return &container
}

View File

@ -19,7 +19,7 @@ package priority
import (
"testing"
"k8s.io/autoscaler/vertical-pod-autoscaler/apimock"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
apiv1 "k8s.io/api/core/v1"
@ -78,10 +78,10 @@ func TestSortPriorityMultiContainers(t *testing.T) {
recommendation := test.Recommendation(containerName, "6", "20M")
cpuRec, _ := resource.ParseQuantity("4")
memRec, _ := resource.ParseQuantity("20M")
container2rec := apimock.ContainerRecommendation{
container2rec := vpa_types.RecommendedContainerResources{
Name: containerName2,
Resources: map[apiv1.ResourceName]resource.Quantity{apiv1.ResourceCPU: cpuRec, apiv1.ResourceMemory: memRec}}
recommendation.Containers = append(recommendation.Containers, container2rec)
Target: map[apiv1.ResourceName]resource.Quantity{apiv1.ResourceCPU: cpuRec, apiv1.ResourceMemory: memRec}}
recommendation.ContainerRecommendations = append(recommendation.ContainerRecommendations, container2rec)
calculator := NewUpdatePriorityCalculator(nil, nil)
calculator.AddPod(pod1, recommendation)

View File

@ -19,14 +19,16 @@ package main
import (
"time"
"k8s.io/autoscaler/vertical-pod-autoscaler/apimock"
recommender "k8s.io/autoscaler/vertical-pod-autoscaler/recommender_mock"
"k8s.io/autoscaler/vertical-pod-autoscaler/updater/eviction"
"k8s.io/autoscaler/vertical-pod-autoscaler/updater/priority"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1"
kube_client "k8s.io/client-go/kubernetes"
v1lister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
@ -41,25 +43,23 @@ type Updater interface {
}
type updater struct {
vpaLister apimock.VerticalPodAutoscalerLister // wait for VPA api
vpaLister vpa_lister.VerticalPodAutoscalerLister
podLister v1lister.PodLister
recommender recommender.CachingRecommender
evictionFactrory eviction.PodsEvictionRestrictionFactory
}
// NewUpdater creates Updater with given configuration
func NewUpdater(kubeClient kube_client.Interface, cacheTTl time.Duration, minReplicasForEvicition int, evictionToleranceFraction float64) Updater {
func NewUpdater(kubeClient kube_client.Interface, vpaClient *vpa_clientset.Clientset, cacheTTl time.Duration, minReplicasForEvicition int, evictionToleranceFraction float64) Updater {
return &updater{
vpaLister: newVpaLister(kubeClient),
vpaLister: newVpaLister(vpaClient),
podLister: newPodLister(kubeClient),
recommender: recommender.NewCachingRecommender(cacheTTl, apimock.NewRecommenderAPI()),
evictionFactrory: eviction.NewPodsEvictionRestrictionFactory(kubeClient, minReplicasForEvicition, evictionToleranceFraction),
}
}
// RunOnce represents single iteration in the main-loop of Updater
func (u *updater) RunOnce() {
vpaList, err := u.vpaLister.List()
vpaList, err := u.vpaLister.List(labels.Everything())
if err != nil {
glog.Fatalf("failed get VPA list: %v", err)
}
@ -70,8 +70,8 @@ func (u *updater) RunOnce() {
}
for _, vpa := range vpaList {
glog.V(2).Infof("processing VPA object targeting %v", vpa.Spec.Target.Selector)
selector, err := labels.Parse(vpa.Spec.Target.Selector)
glog.V(2).Infof("processing VPA object targeting %v", vpa.Spec.Selector)
selector, err := metav1.LabelSelectorAsSelector(vpa.Spec.Selector)
if err != nil {
glog.Errorf("error processing VPA object: failed to create pod selector: %v", err)
continue
@ -106,27 +106,12 @@ func (u *updater) RunOnce() {
}
// getPodsForUpdate returns list of pods that should be updated ordered by update priority
func (u *updater) getPodsForUpdate(pods []*apiv1.Pod, vpa *apimock.VerticalPodAutoscaler) []*apiv1.Pod {
priorityCalculator := priority.NewUpdatePriorityCalculator(&vpa.Spec.ResourcesPolicy, nil)
func (u *updater) getPodsForUpdate(pods []*apiv1.Pod, vpa *vpa_types.VerticalPodAutoscaler) []*apiv1.Pod {
priorityCalculator := priority.NewUpdatePriorityCalculator(&vpa.Spec.ResourcePolicy, nil)
recommendation := vpa.Status.Recommendation
for _, pod := range pods {
recommendation, err := u.recommender.Get(&pod.Spec)
if err != nil {
glog.Errorf("error while getting recommendation for pod %v: %v", pod.Name, err)
continue
}
if recommendation == nil {
if len(vpa.Status.Recommendation.Containers) == 0 {
glog.Warningf("no recommendation for pod: %v", pod.Name)
continue
}
glog.Warningf("fallback to default VPA recommendation for pod: %v", pod.Name)
recommendation = vpa.Status.Recommendation
}
priorityCalculator.AddPod(pod, recommendation)
priorityCalculator.AddPod(pod, &recommendation)
}
return priorityCalculator.GetSortedPods()
@ -152,8 +137,14 @@ func filterDeletedPods(pods []*apiv1.Pod) []*apiv1.Pod {
return result
}
func newVpaLister(kubeClient kube_client.Interface) apimock.VerticalPodAutoscalerLister {
return apimock.NewVpaLister(kubeClient)
func newVpaLister(vpaClient *vpa_clientset.Clientset) vpa_lister.VerticalPodAutoscalerLister {
vpaListWatch := cache.NewListWatchFromClient(vpaClient.Poc().RESTClient(), "vpa", apiv1.NamespaceAll, fields.Everything())
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
vpaLister := vpa_lister.NewVerticalPodAutoscalerLister(store)
vpaReflector := cache.NewReflector(vpaListWatch, &vpa_types.VerticalPodAutoscaler{}, store, time.Hour)
stopCh := make(chan struct{})
go vpaReflector.Run(stopCh)
return vpaLister
}
func newPodLister(kubeClient kube_client.Interface) v1lister.PodLister {

View File

@ -17,11 +17,12 @@ limitations under the License.
package main
import (
"strconv"
"testing"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/vertical-pod-autoscaler/apimock"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
"k8s.io/autoscaler/vertical-pod-autoscaler/updater/eviction"
"k8s.io/kubernetes/pkg/api/testapi"
@ -46,15 +47,11 @@ func TestRunOnce(t *testing.T) {
pods := make([]*apiv1.Pod, livePods)
eviction := &test.PodsEvictionRestrictionMock{}
recommender := &test.RecommenderMock{}
rec := test.Recommendation(containerName, "2", "200M")
for i := range pods {
pods[i] = test.BuildTestPod("test"+string(i), containerName, "1", "100M", &rc.ObjectMeta, &rc.TypeMeta)
pods[i] = test.BuildTestPod("test_"+strconv.Itoa(i), containerName, "1", "100M", &rc.ObjectMeta, &rc.TypeMeta)
pods[i].Spec.NodeSelector = labels
eviction.On("CanEvict", pods[i]).Return(true)
eviction.On("Evict", pods[i]).Return(nil)
recommender.On("Get", &pods[i].Spec).Return(rec, nil)
}
factory := &fakeEvictFactory{eviction}
@ -62,13 +59,12 @@ func TestRunOnce(t *testing.T) {
podLister := &test.PodListerMock{}
podLister.On("List").Return(pods, nil)
vpaObj := test.BuildTestVerticalPodAutoscaler(containerName, "1", "3", "100M", "1G", selector)
vpaLister.On("List").Return([]*apimock.VerticalPodAutoscaler{vpaObj}, nil).Once()
vpaObj := test.BuildTestVerticalPodAutoscaler(containerName, "2", "1", "3", "200M", "100M", "1G", selector)
vpaLister.On("List").Return([]*vpa_types.VerticalPodAutoscaler{vpaObj}, nil).Once()
updater := &updater{
vpaLister: vpaLister,
podLister: podLister,
recommender: recommender,
evictionFactrory: factory,
}
@ -77,7 +73,6 @@ func TestRunOnce(t *testing.T) {
}
func TestRunOnceNotingToProcess(t *testing.T) {
recommender := &test.RecommenderMock{}
eviction := &test.PodsEvictionRestrictionMock{}
factory := &fakeEvictFactory{eviction}
vpaLister := &test.VerticalPodAutoscalerListerMock{}
@ -88,7 +83,6 @@ func TestRunOnceNotingToProcess(t *testing.T) {
updater := &updater{
vpaLister: vpaLister,
podLister: podLister,
recommender: recommender,
evictionFactrory: factory,
}
updater.RunOnce()