switched policy for PodDisruptionBudget from v1beta1 to v1 in time for 1.25

This commit is contained in:
mikelo 2022-06-24 19:13:03 +02:00
parent 6ce299ed36
commit c127763a45
13 changed files with 23 additions and 22 deletions

View File

@ -23,7 +23,8 @@ import (
"time"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
kube_errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
@ -223,7 +224,7 @@ func evictPod(ctx *acontext.AutoscalingContext, podToEvict *apiv1.Pod, isDaemonS
var lastError error
for first := true; first || time.Now().Before(retryUntil); time.Sleep(waitBetweenRetries) {
first = false
eviction := &policyv1.Eviction{
eviction := &policyv1beta1.Eviction{
ObjectMeta: metav1.ObjectMeta{
Namespace: podToEvict.Namespace,
Name: podToEvict.Name,

View File

@ -27,7 +27,7 @@ import (
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -151,7 +151,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
eviction := createAction.GetObject().(*policyv1beta1.Eviction)
if eviction == nil {
return false, nil, nil
}
@ -224,7 +224,7 @@ func TestDrainNodeWithPods(t *testing.T) {
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
eviction := createAction.GetObject().(*policyv1beta1.Eviction)
if eviction == nil {
return false, nil, nil
}
@ -279,7 +279,7 @@ func TestDrainNodeWithPodsWithRescheduled(t *testing.T) {
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
eviction := createAction.GetObject().(*policyv1beta1.Eviction)
if eviction == nil {
return false, nil, nil
}
@ -329,7 +329,7 @@ func TestDrainNodeWithPodsWithRetries(t *testing.T) {
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
eviction := createAction.GetObject().(*policyv1beta1.Eviction)
if eviction == nil {
return false, nil, nil
}
@ -387,7 +387,7 @@ func TestDrainNodeWithPodsDaemonSetEvictionFailure(t *testing.T) {
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
eviction := createAction.GetObject().(*policyv1beta1.Eviction)
if eviction == nil {
return false, nil, nil
}
@ -438,7 +438,7 @@ func TestDrainNodeWithPodsEvictionFailure(t *testing.T) {
if createAction == nil {
return false, nil, nil
}
eviction := createAction.GetObject().(*policyv1.Eviction)
eviction := createAction.GetObject().(*policyv1beta1.Eviction)
if eviction == nil {
return false, nil, nil
}

View File

@ -40,7 +40,7 @@ import (
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
klog "k8s.io/klog/v2"

View File

@ -27,7 +27,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
)
// ScaleDownWrapper wraps legacy scaledown logic to satisfy scaledown.Planner &

View File

@ -25,7 +25,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
)
// Planner is responsible for selecting nodes that should be removed.

View File

@ -45,7 +45,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/fake"
v1appslister "k8s.io/client-go/listers/apps/v1"

View File

@ -27,7 +27,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
klog "k8s.io/klog/v2"

View File

@ -23,7 +23,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"

View File

@ -21,7 +21,7 @@ import (
"time"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"

View File

@ -21,7 +21,7 @@ import (
"time"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"

View File

@ -21,7 +21,7 @@ import (
"time"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"

View File

@ -23,7 +23,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"

View File

@ -22,14 +22,14 @@ import (
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
client "k8s.io/client-go/kubernetes"
v1appslister "k8s.io/client-go/listers/apps/v1"
v1batchlister "k8s.io/client-go/listers/batch/v1"
v1lister "k8s.io/client-go/listers/core/v1"
v1policylister "k8s.io/client-go/listers/policy/v1beta1"
v1policylister "k8s.io/client-go/listers/policy/v1"
"k8s.io/client-go/tools/cache"
podv1 "k8s.io/kubernetes/pkg/api/v1/pod"
)
@ -305,7 +305,7 @@ func (lister *PodDisruptionBudgetListerImpl) List() ([]*policyv1.PodDisruptionBu
// NewPodDisruptionBudgetLister builds a pod disruption budget lister.
func NewPodDisruptionBudgetLister(kubeClient client.Interface, stopchannel <-chan struct{}) PodDisruptionBudgetLister {
listWatcher := cache.NewListWatchFromClient(kubeClient.PolicyV1beta1().RESTClient(), "poddisruptionbudgets", apiv1.NamespaceAll, fields.Everything())
listWatcher := cache.NewListWatchFromClient(kubeClient.PolicyV1().RESTClient(), "poddisruptionbudgets", apiv1.NamespaceAll, fields.Everything())
store, reflector := cache.NewNamespaceKeyedIndexerAndReflector(listWatcher, &policyv1.PodDisruptionBudget{}, time.Hour)
pdbLister := v1policylister.NewPodDisruptionBudgetLister(store)
go reflector.Run(stopchannel)