diff --git a/cluster-autoscaler/processors/provreq/injector.go b/cluster-autoscaler/processors/provreq/injector.go index 9ac7855a58..c628fedf12 100644 --- a/cluster-autoscaler/processors/provreq/injector.go +++ b/cluster-autoscaler/processors/provreq/injector.go @@ -22,7 +22,7 @@ import ( apiv1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/processors/pods" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest" @@ -59,11 +59,11 @@ func (p *ProvisioningRequestPodsInjector) Process( continue } conditions := pr.Status.Conditions - if apimeta.IsStatusConditionTrue(conditions, v1beta1.Failed) || apimeta.IsStatusConditionTrue(conditions, v1beta1.Provisioned) { + if apimeta.IsStatusConditionTrue(conditions, v1.Failed) || apimeta.IsStatusConditionTrue(conditions, v1.Provisioned) { continue } - provisioned := apimeta.FindStatusCondition(conditions, v1beta1.Provisioned) + provisioned := apimeta.FindStatusCondition(conditions, v1.Provisioned) //TODO(yaroslava): support exponential backoff // Inject pods if ProvReq wasn't scaled up before or it has Provisioned == False condition more than defaultRetryTime @@ -79,13 +79,13 @@ func (p *ProvisioningRequestPodsInjector) Process( provreqpods, err := provreqpods.PodsForProvisioningRequest(pr) if err != nil { klog.Errorf("Failed to get pods for ProvisioningRequest %v", pr.Name) - provreqconditions.AddOrUpdateCondition(pr, v1beta1.Failed, metav1.ConditionTrue, provreqconditions.FailedToCreatePodsReason, err.Error(), metav1.NewTime(p.clock.Now())) + provreqconditions.AddOrUpdateCondition(pr, v1.Failed, metav1.ConditionTrue, provreqconditions.FailedToCreatePodsReason, err.Error(), metav1.NewTime(p.clock.Now())) if _, err := p.client.UpdateProvisioningRequest(pr.ProvisioningRequest); err != nil { klog.Errorf("failed add Failed condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, err) } continue } - provreqconditions.AddOrUpdateCondition(pr, v1beta1.Accepted, metav1.ConditionTrue, provreqconditions.AcceptedReason, provreqconditions.AcceptedMsg, metav1.NewTime(p.clock.Now())) + provreqconditions.AddOrUpdateCondition(pr, v1.Accepted, metav1.ConditionTrue, provreqconditions.AcceptedReason, provreqconditions.AcceptedMsg, metav1.NewTime(p.clock.Now())) if _, err := p.client.UpdateProvisioningRequest(pr.ProvisioningRequest); err != nil { klog.Errorf("failed add Accepted condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, err) continue diff --git a/cluster-autoscaler/processors/provreq/injector_test.go b/cluster-autoscaler/processors/provreq/injector_test.go index 625b0a7be5..804aaf782c 100644 --- a/cluster-autoscaler/processors/provreq/injector_test.go +++ b/cluster-autoscaler/processors/provreq/injector_test.go @@ -21,10 +21,10 @@ import ( "testing" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" clock "k8s.io/utils/clock/testing" @@ -36,46 +36,46 @@ func TestProvisioningRequestPodsInjector(t *testing.T) { hourAgo := now.Add(-1 * time.Hour) accepted := metav1.Condition{ - Type: v1beta1.Accepted, + Type: v1.Accepted, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(minAgo), } failed := metav1.Condition{ - Type: v1beta1.Failed, + Type: v1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(hourAgo), } provisioned := metav1.Condition{ - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(hourAgo), } notProvisioned := metav1.Condition{ - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(hourAgo), } unknownProvisioned := metav1.Condition{ - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionUnknown, LastTransitionTime: metav1.NewTime(hourAgo), } notProvisionedRecently := metav1.Condition{ - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(minAgo), } podsA := 10 - newProvReqA := testProvisioningRequestWithCondition("new", podsA, v1beta1.ProvisioningClassCheckCapacity) - newAcceptedProvReqA := testProvisioningRequestWithCondition("new-accepted", podsA, v1beta1.ProvisioningClassCheckCapacity, accepted) + newProvReqA := testProvisioningRequestWithCondition("new", podsA, v1.ProvisioningClassCheckCapacity) + newAcceptedProvReqA := testProvisioningRequestWithCondition("new-accepted", podsA, v1.ProvisioningClassCheckCapacity, accepted) podsB := 20 - notProvisionedAcceptedProvReqB := testProvisioningRequestWithCondition("provisioned-false-B", podsB, v1beta1.ProvisioningClassBestEffortAtomicScaleUp, notProvisioned, accepted) - provisionedAcceptedProvReqB := testProvisioningRequestWithCondition("provisioned-and-accepted", podsB, v1beta1.ProvisioningClassBestEffortAtomicScaleUp, provisioned, accepted) - failedProvReq := testProvisioningRequestWithCondition("failed", podsA, v1beta1.ProvisioningClassBestEffortAtomicScaleUp, failed) - notProvisionedRecentlyProvReqB := testProvisioningRequestWithCondition("provisioned-false-recently-B", podsB, v1beta1.ProvisioningClassBestEffortAtomicScaleUp, notProvisionedRecently) - unknownProvisionedProvReqB := testProvisioningRequestWithCondition("provisioned-unknown-B", podsB, v1beta1.ProvisioningClassBestEffortAtomicScaleUp, unknownProvisioned) + notProvisionedAcceptedProvReqB := testProvisioningRequestWithCondition("provisioned-false-B", podsB, v1.ProvisioningClassBestEffortAtomicScaleUp, notProvisioned, accepted) + provisionedAcceptedProvReqB := testProvisioningRequestWithCondition("provisioned-and-accepted", podsB, v1.ProvisioningClassBestEffortAtomicScaleUp, provisioned, accepted) + failedProvReq := testProvisioningRequestWithCondition("failed", podsA, v1.ProvisioningClassBestEffortAtomicScaleUp, failed) + notProvisionedRecentlyProvReqB := testProvisioningRequestWithCondition("provisioned-false-recently-B", podsB, v1.ProvisioningClassBestEffortAtomicScaleUp, notProvisionedRecently) + unknownProvisionedProvReqB := testProvisioningRequestWithCondition("provisioned-unknown-B", podsB, v1.ProvisioningClassBestEffortAtomicScaleUp, unknownProvisioned) unknownClass := testProvisioningRequestWithCondition("new-accepted", podsA, "unknown-class", accepted) testCases := []struct { @@ -118,7 +118,7 @@ func TestProvisioningRequestPodsInjector(t *testing.T) { for _, tc := range testCases { client := provreqclient.NewFakeProvisioningRequestClient(context.Background(), t, tc.provReqs...) injector := ProvisioningRequestPodsInjector{client, clock.NewFakePassiveClock(now)} - getUnscheduledPods, err := injector.Process(nil, []*v1.Pod{}) + getUnscheduledPods, err := injector.Process(nil, []*corev1.Pod{}) if err != nil { t.Errorf("%s failed: injector.Process return error %v", tc.name, err) } @@ -129,7 +129,7 @@ func TestProvisioningRequestPodsInjector(t *testing.T) { continue } pr, _ := client.ProvisioningRequestNoCache("ns", tc.wantUpdatedConditionName) - accepted := apimeta.FindStatusCondition(pr.Status.Conditions, v1beta1.Accepted) + accepted := apimeta.FindStatusCondition(pr.Status.Conditions, v1.Accepted) if accepted == nil || accepted.LastTransitionTime != metav1.NewTime(now) { t.Errorf("%s: injector.Process hasn't update accepted condition for ProvisioningRequest %s", tc.name, tc.wantUpdatedConditionName) } diff --git a/cluster-autoscaler/processors/provreq/pods_filter.go b/cluster-autoscaler/processors/provreq/pods_filter.go index fb61ce25f3..a2d0d4a3d6 100644 --- a/cluster-autoscaler/processors/provreq/pods_filter.go +++ b/cluster-autoscaler/processors/provreq/pods_filter.go @@ -21,8 +21,8 @@ import ( "time" apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/processors/pods" provreqpods "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods" @@ -96,11 +96,11 @@ func NewProvisioningRequestPodsFilter(e EventManager) pods.PodListProcessor { return &ProvisioningRequestPodsFilter{e} } -func provisioningRequestName(pod *v1.Pod) (string, bool) { +func provisioningRequestName(pod *corev1.Pod) (string, bool) { if pod == nil || pod.Annotations == nil { return "", false } - provReqName, found := pod.Annotations[v1beta1.ProvisioningRequestPodAnnotationKey] + provReqName, found := pod.Annotations[v1.ProvisioningRequestPodAnnotationKey] if !found { provReqName, found = pod.Annotations[provreqpods.DeprecatedProvisioningRequestPodAnnotationKey] } diff --git a/cluster-autoscaler/processors/provreq/pods_filter_test.go b/cluster-autoscaler/processors/provreq/pods_filter_test.go index 80977fcdd3..da041fb5ed 100644 --- a/cluster-autoscaler/processors/provreq/pods_filter_test.go +++ b/cluster-autoscaler/processors/provreq/pods_filter_test.go @@ -23,8 +23,8 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" @@ -33,7 +33,7 @@ import ( func TestProvisioningRequestPodsFilter(t *testing.T) { prPod1 := BuildTestPod("pr-pod-1", 500, 10) - prPod1.Annotations[v1beta1.ProvisioningRequestPodAnnotationKey] = "pr-class" + prPod1.Annotations[v1.ProvisioningRequestPodAnnotationKey] = "pr-class" prPod2 := BuildTestPod("pr-pod-2", 500, 10) prPod2.Annotations[pods.DeprecatedProvisioningRequestPodAnnotationKey] = "pr-class-2" @@ -46,24 +46,24 @@ func TestProvisioningRequestPodsFilter(t *testing.T) { expectedUnscheduledPods []*apiv1.Pod }{ "ProvisioningRequest consumer is filtered out": { - unschedulableCandidates: []*v1.Pod{prPod1, pod1}, - expectedUnscheduledPods: []*v1.Pod{pod1}, + unschedulableCandidates: []*corev1.Pod{prPod1, pod1}, + expectedUnscheduledPods: []*corev1.Pod{pod1}, }, "Different ProvisioningRequest consumers are filtered out": { - unschedulableCandidates: []*v1.Pod{prPod1, prPod2, pod1}, - expectedUnscheduledPods: []*v1.Pod{pod1}, + unschedulableCandidates: []*corev1.Pod{prPod1, prPod2, pod1}, + expectedUnscheduledPods: []*corev1.Pod{pod1}, }, "No pod is filtered": { - unschedulableCandidates: []*v1.Pod{pod1, pod2}, - expectedUnscheduledPods: []*v1.Pod{pod1, pod2}, + unschedulableCandidates: []*corev1.Pod{pod1, pod2}, + expectedUnscheduledPods: []*corev1.Pod{pod1, pod2}, }, "Empty unschedulable pods list": { - unschedulableCandidates: []*v1.Pod{}, - expectedUnscheduledPods: []*v1.Pod{}, + unschedulableCandidates: []*corev1.Pod{}, + expectedUnscheduledPods: []*corev1.Pod{}, }, "All ProvisioningRequest consumers are filtered out": { - unschedulableCandidates: []*v1.Pod{prPod1, prPod2}, - expectedUnscheduledPods: []*v1.Pod{}, + unschedulableCandidates: []*corev1.Pod{prPod1, prPod2}, + expectedUnscheduledPods: []*corev1.Pod{}, }, } for _, test := range testCases { @@ -89,11 +89,11 @@ func TestEventManager(t *testing.T) { prFilter := NewProvisioningRequestPodsFilter(eventManager) eventRecorder := record.NewFakeRecorder(10) ctx := &context.AutoscalingContext{AutoscalingKubeClients: context.AutoscalingKubeClients{Recorder: eventRecorder}} - unscheduledPods := []*v1.Pod{BuildTestPod("pod", 500, 10)} + unscheduledPods := []*corev1.Pod{BuildTestPod("pod", 500, 10)} for i := 0; i < 10; i++ { prPod := BuildTestPod(fmt.Sprintf("pr-pod-%d", i), 10, 10) - prPod.Annotations[v1beta1.ProvisioningRequestPodAnnotationKey] = "pr-class" + prPod.Annotations[v1.ProvisioningRequestPodAnnotationKey] = "pr-class" unscheduledPods = append(unscheduledPods, prPod) } got, err := prFilter.Process(ctx, unscheduledPods) diff --git a/cluster-autoscaler/processors/provreq/processor.go b/cluster-autoscaler/processors/provreq/processor.go index 3708366d2d..a08af27f8a 100644 --- a/cluster-autoscaler/processors/provreq/processor.go +++ b/cluster-autoscaler/processors/provreq/processor.go @@ -23,7 +23,7 @@ import ( apiv1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/conditions" @@ -87,10 +87,10 @@ func (p *provReqProcessor) refresh(provReqs []*provreqwrapper.ProvisioningReques continue } conditions := provReq.Status.Conditions - if apimeta.IsStatusConditionTrue(conditions, v1beta1.BookingExpired) || apimeta.IsStatusConditionTrue(conditions, v1beta1.Failed) { + if apimeta.IsStatusConditionTrue(conditions, v1.BookingExpired) || apimeta.IsStatusConditionTrue(conditions, v1.Failed) { continue } - provisioned := apimeta.FindStatusCondition(conditions, v1beta1.Provisioned) + provisioned := apimeta.FindStatusCondition(conditions, v1.Provisioned) if provisioned != nil && provisioned.Status == metav1.ConditionTrue { if provisioned.LastTransitionTime.Add(defaultReservationTime).Before(p.now()) { expiredProvReq = append(expiredProvReq, provReq) @@ -103,7 +103,7 @@ func (p *provReqProcessor) refresh(provReqs []*provreqwrapper.ProvisioningReques } } for _, provReq := range expiredProvReq { - conditions.AddOrUpdateCondition(provReq, v1beta1.BookingExpired, metav1.ConditionTrue, conditions.CapacityReservationTimeExpiredReason, conditions.CapacityReservationTimeExpiredMsg, metav1.NewTime(p.now())) + conditions.AddOrUpdateCondition(provReq, v1.BookingExpired, metav1.ConditionTrue, conditions.CapacityReservationTimeExpiredReason, conditions.CapacityReservationTimeExpiredMsg, metav1.NewTime(p.now())) _, updErr := p.client.UpdateProvisioningRequest(provReq.ProvisioningRequest) if updErr != nil { klog.Errorf("failed to add BookingExpired condition to ProvReq %s/%s, err: %v", provReq.Namespace, provReq.Name, updErr) @@ -111,7 +111,7 @@ func (p *provReqProcessor) refresh(provReqs []*provreqwrapper.ProvisioningReques } } for _, provReq := range failedProvReq { - conditions.AddOrUpdateCondition(provReq, v1beta1.Failed, metav1.ConditionTrue, conditions.ExpiredReason, conditions.ExpiredMsg, metav1.NewTime(p.now())) + conditions.AddOrUpdateCondition(provReq, v1.Failed, metav1.ConditionTrue, conditions.ExpiredReason, conditions.ExpiredMsg, metav1.NewTime(p.now())) _, updErr := p.client.UpdateProvisioningRequest(provReq.ProvisioningRequest) if updErr != nil { klog.Errorf("failed to add Failed condition to ProvReq %s/%s, err: %v", provReq.Namespace, provReq.Name, updErr) @@ -150,7 +150,7 @@ func (p *provReqProcessor) bookCapacity(ctx *context.AutoscalingContext) error { // ClusterAutoscaler was able to create pods before, so we shouldn't have error here. // If there is an error, mark PR as invalid, because we won't be able to book capacity // for it anyway. - conditions.AddOrUpdateCondition(provReq, v1beta1.Failed, metav1.ConditionTrue, conditions.FailedToBookCapacityReason, fmt.Sprintf("Couldn't create pods, err: %v", err), metav1.Now()) + conditions.AddOrUpdateCondition(provReq, v1.Failed, metav1.ConditionTrue, conditions.FailedToBookCapacityReason, fmt.Sprintf("Couldn't create pods, err: %v", err), metav1.Now()) if _, err := p.client.UpdateProvisioningRequest(provReq.ProvisioningRequest); err != nil { klog.Errorf("failed to add Accepted condition to ProvReq %s/%s, err: %v", provReq.Namespace, provReq.Name, err) } diff --git a/cluster-autoscaler/processors/provreq/processor_test.go b/cluster-autoscaler/processors/provreq/processor_test.go index 2a592fd6ee..e2eb8cae9e 100644 --- a/cluster-autoscaler/processors/provreq/processor_test.go +++ b/cluster-autoscaler/processors/provreq/processor_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/config" . "k8s.io/autoscaler/cluster-autoscaler/core/test" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/conditions" @@ -56,7 +56,7 @@ func TestRefresh(t *testing.T) { creationTime: weekAgo, wantConditions: []metav1.Condition{ { - Type: v1beta1.Failed, + Type: v1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), Reason: conditions.ExpiredReason, @@ -69,7 +69,7 @@ func TestRefresh(t *testing.T) { creationTime: weekAgo, conditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(dayAgo), Reason: conditions.ExpiredReason, @@ -78,14 +78,14 @@ func TestRefresh(t *testing.T) { }, wantConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(dayAgo), Reason: conditions.ExpiredReason, Message: conditions.ExpiredMsg, }, { - Type: v1beta1.Failed, + Type: v1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), Reason: conditions.ExpiredReason, @@ -98,7 +98,7 @@ func TestRefresh(t *testing.T) { creationTime: dayAgo, conditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(dayAgo), Reason: conditions.ExpiredReason, @@ -107,14 +107,14 @@ func TestRefresh(t *testing.T) { }, wantConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, + Type: v1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(dayAgo), Reason: conditions.ExpiredReason, Message: conditions.ExpiredMsg, }, { - Type: v1beta1.BookingExpired, + Type: v1.BookingExpired, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), Reason: conditions.CapacityReservationTimeExpiredReason, @@ -127,7 +127,7 @@ func TestRefresh(t *testing.T) { creationTime: dayAgo, conditions: []metav1.Condition{ { - Type: v1beta1.Failed, + Type: v1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(dayAgo), Reason: "Failed", @@ -136,7 +136,7 @@ func TestRefresh(t *testing.T) { }, wantConditions: []metav1.Condition{ { - Type: v1beta1.Failed, + Type: v1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(dayAgo), Reason: "Failed", @@ -149,17 +149,17 @@ func TestRefresh(t *testing.T) { pr := provreqclient.ProvisioningRequestWrapperForTesting("namespace", "name-1") pr.Status.Conditions = test.conditions pr.CreationTimestamp = metav1.NewTime(test.creationTime) - pr.Spec.ProvisioningClassName = v1beta1.ProvisioningClassCheckCapacity + pr.Spec.ProvisioningClassName = v1.ProvisioningClassCheckCapacity additionalPr := provreqclient.ProvisioningRequestWrapperForTesting("namespace", "additional") additionalPr.CreationTimestamp = metav1.NewTime(weekAgo) - additionalPr.Spec.ProvisioningClassName = v1beta1.ProvisioningClassCheckCapacity + additionalPr.Spec.ProvisioningClassName = v1.ProvisioningClassCheckCapacity processor := provReqProcessor{func() time.Time { return now }, 1, provreqclient.NewFakeProvisioningRequestClient(nil, t, pr, additionalPr), nil} processor.refresh([]*provreqwrapper.ProvisioningRequest{pr, additionalPr}) assert.ElementsMatch(t, test.wantConditions, pr.Status.Conditions) if len(test.conditions) == len(test.wantConditions) { assert.ElementsMatch(t, []metav1.Condition{ { - Type: v1beta1.Failed, + Type: v1.Failed, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), Reason: conditions.ExpiredReason, @@ -190,37 +190,37 @@ func TestBookCapacity(t *testing.T) { }{ { name: "ProvReq is new, check-capacity class", - provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1beta1.ProvisioningClassCheckCapacity), + provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1.ProvisioningClassCheckCapacity), capacityIsBooked: false, }, { name: "ProvReq is Failed, best-effort-atomic class", - conditions: []string{v1beta1.Failed}, - provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1beta1.ProvisioningClassBestEffortAtomicScaleUp), + conditions: []string{v1.Failed}, + provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1.ProvisioningClassBestEffortAtomicScaleUp), capacityIsBooked: false, }, { name: "ProvReq is Provisioned, unknown class", - conditions: []string{v1beta1.Provisioned}, + conditions: []string{v1.Provisioned}, provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), "unknown"), capacityIsBooked: false, }, { name: "ProvReq is Provisioned, capacity should be booked, check-capacity class", - conditions: []string{v1beta1.Provisioned}, - provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1beta1.ProvisioningClassCheckCapacity), + conditions: []string{v1.Provisioned}, + provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1.ProvisioningClassCheckCapacity), capacityIsBooked: true, }, { name: "ProvReq is Provisioned, capacity should be booked, best-effort-atomic class", - conditions: []string{v1beta1.Provisioned}, - provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1beta1.ProvisioningClassBestEffortAtomicScaleUp), + conditions: []string{v1.Provisioned}, + provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1.ProvisioningClassBestEffortAtomicScaleUp), capacityIsBooked: true, }, { name: "ProvReq has BookingExpired, capacity should not be booked, best-effort-atomic class", - conditions: []string{v1beta1.Provisioned, v1beta1.BookingExpired}, - provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1beta1.ProvisioningClassBestEffortAtomicScaleUp), + conditions: []string{v1.Provisioned, v1.BookingExpired}, + provReq: provreqwrapper.BuildTestProvisioningRequest("ns", "pr", "2", "100m", "", 10, false, time.Now(), v1.ProvisioningClassBestEffortAtomicScaleUp), capacityIsBooked: false, }, } diff --git a/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go b/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go index 4adf8471d4..122312b622 100644 --- a/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go +++ b/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup" @@ -82,7 +82,7 @@ func (o *bestEffortAtomicProvClass) Provision( return &status.ScaleUpStatus{Result: status.ScaleUpNotTried}, nil } prs := provreqclient.ProvisioningRequestsForPods(o.client, unschedulablePods) - prs = provreqclient.FilterOutProvisioningClass(prs, v1beta1.ProvisioningClassBestEffortAtomicScaleUp) + prs = provreqclient.FilterOutProvisioningClass(prs, v1.ProvisioningClassBestEffortAtomicScaleUp) if len(prs) == 0 { return &status.ScaleUpStatus{Result: status.ScaleUpNotTried}, nil } @@ -95,7 +95,7 @@ func (o *bestEffortAtomicProvClass) Provision( // For provisioning requests, unschedulablePods are actually all injected pods. Some may even be schedulable! actuallyUnschedulablePods, err := o.filterOutSchedulable(unschedulablePods) if err != nil { - conditions.AddOrUpdateCondition(pr, v1beta1.Provisioned, metav1.ConditionFalse, conditions.FailedToCheckCapacityReason, conditions.FailedToCheckCapacityMsg, metav1.Now()) + conditions.AddOrUpdateCondition(pr, v1.Provisioned, metav1.ConditionFalse, conditions.FailedToCheckCapacityReason, conditions.FailedToCheckCapacityMsg, metav1.Now()) if _, updateErr := o.client.UpdateProvisioningRequest(pr.ProvisioningRequest); updateErr != nil { klog.Errorf("failed to add Provisioned=false condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, updateErr) } @@ -104,7 +104,7 @@ func (o *bestEffortAtomicProvClass) Provision( if len(actuallyUnschedulablePods) == 0 { // Nothing to do here - everything fits without scale-up. - conditions.AddOrUpdateCondition(pr, v1beta1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsFoundReason, conditions.CapacityIsFoundMsg, metav1.Now()) + conditions.AddOrUpdateCondition(pr, v1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsFoundReason, conditions.CapacityIsFoundMsg, metav1.Now()) if _, updateErr := o.client.UpdateProvisioningRequest(pr.ProvisioningRequest); updateErr != nil { klog.Errorf("failed to add Provisioned=true condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, updateErr) return status.UpdateScaleUpError(&status.ScaleUpStatus{}, errors.NewAutoscalerError(errors.InternalError, "capacity available, but failed to admit workload: %s", updateErr.Error())) @@ -115,7 +115,7 @@ func (o *bestEffortAtomicProvClass) Provision( st, err := o.scaleUpOrchestrator.ScaleUp(actuallyUnschedulablePods, nodes, daemonSets, nodeInfos, true) if err == nil && st.Result == status.ScaleUpSuccessful { // Happy path - all is well. - conditions.AddOrUpdateCondition(pr, v1beta1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsProvisionedReason, conditions.CapacityIsProvisionedMsg, metav1.Now()) + conditions.AddOrUpdateCondition(pr, v1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsProvisionedReason, conditions.CapacityIsProvisionedMsg, metav1.Now()) if _, updateErr := o.client.UpdateProvisioningRequest(pr.ProvisioningRequest); updateErr != nil { klog.Errorf("failed to add Provisioned=true condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, updateErr) return st, errors.NewAutoscalerError(errors.InternalError, "scale up requested, but failed to admit workload: %s", updateErr.Error()) @@ -124,7 +124,7 @@ func (o *bestEffortAtomicProvClass) Provision( } // We are not happy with the results. - conditions.AddOrUpdateCondition(pr, v1beta1.Provisioned, metav1.ConditionFalse, conditions.CapacityIsNotFoundReason, "Capacity is not found, CA will try to find it later.", metav1.Now()) + conditions.AddOrUpdateCondition(pr, v1.Provisioned, metav1.ConditionFalse, conditions.CapacityIsNotFoundReason, "Capacity is not found, CA will try to find it later.", metav1.Now()) if _, updateErr := o.client.UpdateProvisioningRequest(pr.ProvisioningRequest); updateErr != nil { klog.Errorf("failed to add Provisioned=false condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, updateErr) } diff --git a/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go b/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go index c87f73bb73..bd86c0190f 100644 --- a/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go +++ b/cluster-autoscaler/provisioningrequest/checkcapacity/provisioningclass.go @@ -22,7 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/estimator" @@ -75,7 +75,7 @@ func (o *checkCapacityProvClass) Provision( } prs := provreqclient.ProvisioningRequestsForPods(o.client, unschedulablePods) - prs = provreqclient.FilterOutProvisioningClass(prs, v1beta1.ProvisioningClassCheckCapacity) + prs = provreqclient.FilterOutProvisioningClass(prs, v1.ProvisioningClassCheckCapacity) if len(prs) == 0 { return &status.ScaleUpStatus{Result: status.ScaleUpNotTried}, nil } @@ -99,10 +99,10 @@ func (o *checkCapacityProvClass) checkcapacity(unschedulablePods []*apiv1.Pod, p capacityAvailable = true st, _, err := o.injector.TrySchedulePods(o.context.ClusterSnapshot, unschedulablePods, scheduling.ScheduleAnywhere, true) if len(st) < len(unschedulablePods) || err != nil { - conditions.AddOrUpdateCondition(provReq, v1beta1.Provisioned, metav1.ConditionFalse, conditions.CapacityIsNotFoundReason, "Capacity is not found, CA will try to find it later.", metav1.Now()) + conditions.AddOrUpdateCondition(provReq, v1.Provisioned, metav1.ConditionFalse, conditions.CapacityIsNotFoundReason, "Capacity is not found, CA will try to find it later.", metav1.Now()) capacityAvailable = false } else { - conditions.AddOrUpdateCondition(provReq, v1beta1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsFoundReason, conditions.CapacityIsFoundMsg, metav1.Now()) + conditions.AddOrUpdateCondition(provReq, v1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsFoundReason, conditions.CapacityIsFoundMsg, metav1.Now()) } _, updErr := o.client.UpdateProvisioningRequest(provReq.ProvisioningRequest) if updErr != nil { diff --git a/cluster-autoscaler/provisioningrequest/conditions/condition_test.go b/cluster-autoscaler/provisioningrequest/conditions/condition_test.go index 86e1c5e4a6..081b6b78d8 100644 --- a/cluster-autoscaler/provisioningrequest/conditions/condition_test.go +++ b/cluster-autoscaler/provisioningrequest/conditions/condition_test.go @@ -19,8 +19,8 @@ package conditions import ( "testing" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" ) @@ -28,33 +28,33 @@ import ( func TestBookCapacity(t *testing.T) { tests := []struct { name string - prConditions []v1.Condition + prConditions []metav1.Condition want bool }{ { name: "BookingExpired", - prConditions: []v1.Condition{ + prConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, { - Type: v1beta1.BookingExpired, - Status: v1.ConditionTrue, + Type: v1.BookingExpired, + Status: metav1.ConditionTrue, }, }, want: false, }, { name: "Failed", - prConditions: []v1.Condition{ + prConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, { - Type: v1beta1.Failed, - Status: v1.ConditionTrue, + Type: v1.Failed, + Status: metav1.ConditionTrue, }, }, want: false, @@ -65,24 +65,24 @@ func TestBookCapacity(t *testing.T) { }, { name: "Capacity found and provisioned", - prConditions: []v1.Condition{ + prConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, want: true, }, { name: "Capacity is not found", - prConditions: []v1.Condition{ + prConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionFalse, + Type: v1.Provisioned, + Status: metav1.ConditionFalse, }, }, want: false, @@ -92,11 +92,11 @@ func TestBookCapacity(t *testing.T) { t.Run(test.name, func(t *testing.T) { for class := range provisioningrequest.SupportedProvisioningClasses { pr := provreqwrapper.NewProvisioningRequest( - &v1beta1.ProvisioningRequest{ - Spec: v1beta1.ProvisioningRequestSpec{ + &v1.ProvisioningRequest{ + Spec: v1.ProvisioningRequestSpec{ ProvisioningClassName: class, }, - Status: v1beta1.ProvisioningRequestStatus{ + Status: v1.ProvisioningRequestStatus{ Conditions: test.prConditions, }, }, nil) @@ -112,138 +112,138 @@ func TestBookCapacity(t *testing.T) { func TestSetCondition(t *testing.T) { tests := []struct { name string - oldConditions []v1.Condition + oldConditions []metav1.Condition newType string - newStatus v1.ConditionStatus - want []v1.Condition + newStatus metav1.ConditionStatus + want []metav1.Condition }{ { name: "Accepted added, empty conditions before", - newType: v1beta1.Accepted, - newStatus: v1.ConditionTrue, - want: []v1.Condition{ + newType: v1.Accepted, + newStatus: metav1.ConditionTrue, + want: []metav1.Condition{ { - Type: v1beta1.Accepted, - Status: v1.ConditionTrue, + Type: v1.Accepted, + Status: metav1.ConditionTrue, }, }, }, { name: "Provisioned added, empty conditions before", - newType: v1beta1.Provisioned, - newStatus: v1.ConditionTrue, - want: []v1.Condition{ + newType: v1.Provisioned, + newStatus: metav1.ConditionTrue, + want: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, }, { name: "Provisioned updated", - oldConditions: []v1.Condition{ + oldConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionFalse, + Type: v1.Provisioned, + Status: metav1.ConditionFalse, }, }, - newType: v1beta1.Provisioned, - newStatus: v1.ConditionTrue, - want: []v1.Condition{ + newType: v1.Provisioned, + newStatus: metav1.ConditionTrue, + want: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, }, { name: "Failed added, non-empty conditions before", - oldConditions: []v1.Condition{ + oldConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, - newType: v1beta1.Failed, - newStatus: v1.ConditionTrue, - want: []v1.Condition{ + newType: v1.Failed, + newStatus: metav1.ConditionTrue, + want: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, { - Type: v1beta1.Failed, - Status: v1.ConditionTrue, + Type: v1.Failed, + Status: metav1.ConditionTrue, }, }, }, { name: "Unknown condition status, conditions are updated", - oldConditions: []v1.Condition{ + oldConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, - newType: v1beta1.Failed, - newStatus: v1.ConditionUnknown, - want: []v1.Condition{ + newType: v1.Failed, + newStatus: metav1.ConditionUnknown, + want: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, { - Type: v1beta1.Failed, - Status: v1.ConditionUnknown, + Type: v1.Failed, + Status: metav1.ConditionUnknown, }, }, }, { name: "Unknown condition type, conditions are not updated", - oldConditions: []v1.Condition{ + oldConditions: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, newType: "Unknown", - newStatus: v1.ConditionTrue, - want: []v1.Condition{ + newStatus: metav1.ConditionTrue, + want: []metav1.Condition{ { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, }, { name: "BookingExpired, empty conditions before", - newType: v1beta1.BookingExpired, - newStatus: v1.ConditionFalse, - want: []v1.Condition{ + newType: v1.BookingExpired, + newStatus: metav1.ConditionFalse, + want: []metav1.Condition{ { - Type: v1beta1.BookingExpired, - Status: v1.ConditionFalse, + Type: v1.BookingExpired, + Status: metav1.ConditionFalse, }, }, }, { name: "Capacity found with unknown condition before", - oldConditions: []v1.Condition{ + oldConditions: []metav1.Condition{ { Type: "unknown", - Status: v1.ConditionTrue, + Status: metav1.ConditionTrue, }, }, - newType: v1beta1.Provisioned, - newStatus: v1.ConditionTrue, - want: []v1.Condition{ + newType: v1.Provisioned, + newStatus: metav1.ConditionTrue, + want: []metav1.Condition{ { Type: "unknown", - Status: v1.ConditionTrue, + Status: metav1.ConditionTrue, }, { - Type: v1beta1.Provisioned, - Status: v1.ConditionTrue, + Type: v1.Provisioned, + Status: metav1.ConditionTrue, }, }, }, @@ -251,12 +251,12 @@ func TestSetCondition(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { pr := provreqwrapper.NewProvisioningRequest( - &v1beta1.ProvisioningRequest{ - Status: v1beta1.ProvisioningRequestStatus{ + &v1.ProvisioningRequest{ + Status: v1.ProvisioningRequestStatus{ Conditions: test.oldConditions, }, }, nil) - AddOrUpdateCondition(pr, test.newType, test.newStatus, "", "", v1.Now()) + AddOrUpdateCondition(pr, test.newType, test.newStatus, "", "", metav1.Now()) got := pr.Status.Conditions if len(got) > 2 || len(got) != len(test.want) || got[0].Type != test.want[0].Type || got[0].Status != test.want[0].Status { t.Errorf("want %v, got: %v", test.want, got) diff --git a/cluster-autoscaler/provisioningrequest/conditions/conditions.go b/cluster-autoscaler/provisioningrequest/conditions/conditions.go index 2688c2d0b4..33a66d1478 100644 --- a/cluster-autoscaler/provisioningrequest/conditions/conditions.go +++ b/cluster-autoscaler/provisioningrequest/conditions/conditions.go @@ -19,7 +19,7 @@ package conditions import ( apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/klog/v2" @@ -64,9 +64,9 @@ func ShouldCapacityBeBooked(pr *provreqwrapper.ProvisioningRequest) bool { return false } conditions := pr.Status.Conditions - if apimeta.IsStatusConditionTrue(conditions, v1beta1.Failed) || apimeta.IsStatusConditionTrue(conditions, v1beta1.BookingExpired) { + if apimeta.IsStatusConditionTrue(conditions, v1.Failed) || apimeta.IsStatusConditionTrue(conditions, v1.BookingExpired) { return false - } else if apimeta.IsStatusConditionTrue(conditions, v1beta1.Provisioned) { + } else if apimeta.IsStatusConditionTrue(conditions, v1.Provisioned) { return true } return false @@ -85,7 +85,7 @@ func AddOrUpdateCondition(pr *provreqwrapper.ProvisioningRequest, conditionType } prevConditions := pr.Status.Conditions switch conditionType { - case v1beta1.Provisioned, v1beta1.BookingExpired, v1beta1.Failed, v1beta1.Accepted: + case v1.Provisioned, v1.BookingExpired, v1.Failed, v1.Accepted: conditionFound := false for _, condition := range prevConditions { if condition.Type == conditionType { diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go index 658c4f7341..bd3568aa4f 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go @@ -23,10 +23,10 @@ import ( "time" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/config" @@ -75,7 +75,7 @@ func TestScaleUp(t *testing.T) { CPU: "5m", Memory: "5", PodCount: int32(100), - Class: v1beta1.ProvisioningClassCheckCapacity, + Class: v1.ProvisioningClassCheckCapacity, }) newCheckCapacityMemProvReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( @@ -84,7 +84,7 @@ func TestScaleUp(t *testing.T) { CPU: "1m", Memory: "100", PodCount: int32(100), - Class: v1beta1.ProvisioningClassCheckCapacity, + Class: v1.ProvisioningClassCheckCapacity, }) // Active atomic scale up requests. @@ -94,7 +94,7 @@ func TestScaleUp(t *testing.T) { CPU: "5m", Memory: "5", PodCount: int32(5), - Class: v1beta1.ProvisioningClassBestEffortAtomicScaleUp, + Class: v1.ProvisioningClassBestEffortAtomicScaleUp, }) largeAtomicScaleUpProvReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( provreqwrapper.TestProvReqOptions{ @@ -102,7 +102,7 @@ func TestScaleUp(t *testing.T) { CPU: "1m", Memory: "100", PodCount: int32(100), - Class: v1beta1.ProvisioningClassBestEffortAtomicScaleUp, + Class: v1.ProvisioningClassBestEffortAtomicScaleUp, }) impossibleAtomicScaleUpReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( provreqwrapper.TestProvReqOptions{ @@ -110,7 +110,7 @@ func TestScaleUp(t *testing.T) { CPU: "1m", Memory: "1", PodCount: int32(5001), - Class: v1beta1.ProvisioningClassBestEffortAtomicScaleUp, + Class: v1.ProvisioningClassBestEffortAtomicScaleUp, }) possibleAtomicScaleUpReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( provreqwrapper.TestProvReqOptions{ @@ -118,7 +118,7 @@ func TestScaleUp(t *testing.T) { CPU: "100m", Memory: "1", PodCount: int32(120), - Class: v1beta1.ProvisioningClassBestEffortAtomicScaleUp, + Class: v1.ProvisioningClassBestEffortAtomicScaleUp, }) autoprovisioningAtomicScaleUpReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( provreqwrapper.TestProvReqOptions{ @@ -126,7 +126,7 @@ func TestScaleUp(t *testing.T) { CPU: "100m", Memory: "100", PodCount: int32(5), - Class: v1beta1.ProvisioningClassBestEffortAtomicScaleUp, + Class: v1.ProvisioningClassBestEffortAtomicScaleUp, }) // Already provisioned provisioning request - capacity should be booked before processing a new request. @@ -137,9 +137,9 @@ func TestScaleUp(t *testing.T) { CPU: "1m", Memory: "200", PodCount: int32(100), - Class: v1beta1.ProvisioningClassCheckCapacity, + Class: v1.ProvisioningClassCheckCapacity, }) - bookedCapacityProvReq.SetConditions([]metav1.Condition{{Type: v1beta1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now()}}) + bookedCapacityProvReq.SetConditions([]metav1.Condition{{Type: v1.Provisioned, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now()}}) // Expired provisioning request - should be ignored. expiredProvReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( @@ -148,9 +148,9 @@ func TestScaleUp(t *testing.T) { CPU: "1m", Memory: "200", PodCount: int32(100), - Class: v1beta1.ProvisioningClassCheckCapacity, + Class: v1.ProvisioningClassCheckCapacity, }) - expiredProvReq.SetConditions([]metav1.Condition{{Type: v1beta1.BookingExpired, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now()}}) + expiredProvReq.SetConditions([]metav1.Condition{{Type: v1.BookingExpired, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now()}}) // Unsupported provisioning request - should be ignored. unsupportedProvReq := provreqwrapper.BuildValidTestProvisioningRequestFromOptions( @@ -254,7 +254,7 @@ func TestScaleUp(t *testing.T) { } orchestrator, nodeInfos := setupTest(t, allNodes, tc.provReqs, onScaleUpFunc, tc.autoprovisioning) - st, err := orchestrator.ScaleUp(prPods, []*apiv1.Node{}, []*v1.DaemonSet{}, nodeInfos, false) + st, err := orchestrator.ScaleUp(prPods, []*apiv1.Node{}, []*appsv1.DaemonSet{}, nodeInfos, false) if !tc.err { assert.NoError(t, err) if tc.scaleUpResult != st.Result && len(st.PodsRemainUnschedulable) > 0 { @@ -304,7 +304,7 @@ func setupTest(t *testing.T, nodes []*apiv1.Node, prs []*provreqwrapper.Provisio } now := time.Now() - nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&autoscalingContext, nodes, []*v1.DaemonSet{}, taints.TaintConfig{}, now) + nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&autoscalingContext, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) options := config.AutoscalingOptions{ diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go index c74e4a1148..07a902b2e5 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator.go @@ -19,7 +19,7 @@ package orchestrator import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup" @@ -87,7 +87,7 @@ func (o *WrapperOrchestrator) ScaleUp( func splitOut(unschedulablePods []*apiv1.Pod) (provReqPods, regularPods []*apiv1.Pod) { for _, pod := range unschedulablePods { - if _, ok := pod.Annotations[v1beta1.ProvisioningRequestPodAnnotationKey]; ok { + if _, ok := pod.Annotations[v1.ProvisioningRequestPodAnnotationKey]; ok { provReqPods = append(provReqPods, pod) } else { regularPods = append(regularPods, pod) diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go index 814a213b34..64644ee8d3 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/wrapper_orchestrator_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/estimator" @@ -53,7 +53,7 @@ func TestWrapperScaleUp(t *testing.T) { BuildTestPod("pr-pod-2", 1, 100), } for _, pod := range provReqPods { - pod.Annotations[v1beta1.ProvisioningRequestPodAnnotationKey] = "true" + pod.Annotations[v1.ProvisioningRequestPodAnnotationKey] = "true" } unschedulablePods := append(regularPods, provReqPods...) _, err := o.ScaleUp(unschedulablePods, nil, nil, nil, false) diff --git a/cluster-autoscaler/provisioningrequest/pods/pods.go b/cluster-autoscaler/provisioningrequest/pods/pods.go index 4d8ef3e378..bf401a3451 100644 --- a/cluster-autoscaler/provisioningrequest/pods/pods.go +++ b/cluster-autoscaler/provisioningrequest/pods/pods.go @@ -20,12 +20,12 @@ import ( "fmt" "google.golang.org/protobuf/proto" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" - corev1 "k8s.io/kubernetes/pkg/apis/core/v1" + apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/controller" ) @@ -38,7 +38,7 @@ const ( // PodsForProvisioningRequest returns a list of pods for which Provisioning // Request needs to provision resources. -func PodsForProvisioningRequest(pr *provreqwrapper.ProvisioningRequest) ([]*v1.Pod, error) { +func PodsForProvisioningRequest(pr *provreqwrapper.ProvisioningRequest) ([]*corev1.Pod, error) { if pr == nil { return nil, nil } @@ -46,7 +46,7 @@ func PodsForProvisioningRequest(pr *provreqwrapper.ProvisioningRequest) ([]*v1.P if err != nil { return nil, err } - pods := make([]*v1.Pod, 0) + pods := make([]*corev1.Pod, 0) for i, podSet := range podSets { for j := 0; j < int(podSet.Count); j++ { pod, err := controller.GetPodFromTemplate(&podSet.PodTemplate, pr.ProvisioningRequest, ownerReference(pr)) @@ -54,7 +54,7 @@ func PodsForProvisioningRequest(pr *provreqwrapper.ProvisioningRequest) ([]*v1.P return nil, fmt.Errorf("while creating pod for pr: %s/%s podSet: %d, got error: %w", pr.Namespace, pr.Name, i, err) } populatePodFields(pr, pod, i, j) - corev1.SetDefaults_Pod(pod) + apiscorev1.SetDefaults_Pod(pod) pods = append(pods, pod) } } @@ -74,14 +74,14 @@ func ownerReference(pr *provreqwrapper.ProvisioningRequest) *metav1.OwnerReferen } } -func populatePodFields(pr *provreqwrapper.ProvisioningRequest, pod *v1.Pod, i, j int) { +func populatePodFields(pr *provreqwrapper.ProvisioningRequest, pod *corev1.Pod, i, j int) { pod.Name = fmt.Sprintf("%s%d-%d", pod.GenerateName, i, j) pod.Namespace = pr.Namespace if pod.Annotations == nil { pod.Annotations = make(map[string]string) } - pod.Annotations[v1beta1.ProvisioningRequestPodAnnotationKey] = pr.Name - pod.Annotations[v1beta1.ProvisioningClassPodAnnotationKey] = pr.Spec.ProvisioningClassName + pod.Annotations[v1.ProvisioningRequestPodAnnotationKey] = pr.Name + pod.Annotations[v1.ProvisioningClassPodAnnotationKey] = pr.Spec.ProvisioningClassName pod.UID = types.UID(fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)) pod.CreationTimestamp = pr.CreationTimestamp } diff --git a/cluster-autoscaler/provisioningrequest/pods/pods_test.go b/cluster-autoscaler/provisioningrequest/pods/pods_test.go index 82cd3c4987..48bdf0c78c 100644 --- a/cluster-autoscaler/provisioningrequest/pods/pods_test.go +++ b/cluster-autoscaler/provisioningrequest/pods/pods_test.go @@ -23,11 +23,11 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/proto" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/utils/ptr" ) @@ -35,16 +35,16 @@ import ( const testProvisioningClassName = "TestProvisioningClass" func TestPodsForProvisioningRequest(t *testing.T) { - testPod := func(name, genName, containerName, containerImage, prName string) *v1.Pod { - return &v1.Pod{ + testPod := func(name, genName, containerName, containerImage, prName string) *corev1.Pod { + return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, GenerateName: genName, Namespace: "test-namespace", UID: types.UID(fmt.Sprintf("test-namespace/%s", name)), Annotations: map[string]string{ - v1beta1.ProvisioningRequestPodAnnotationKey: prName, - v1beta1.ProvisioningClassPodAnnotationKey: testProvisioningClassName, + v1.ProvisioningRequestPodAnnotationKey: prName, + v1.ProvisioningClassPodAnnotationKey: testProvisioningClassName, }, Labels: map[string]string{}, Finalizers: []string{}, @@ -55,8 +55,8 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: containerName, Image: containerImage, @@ -69,37 +69,37 @@ func TestPodsForProvisioningRequest(t *testing.T) { tests := []struct { desc string - pr *v1beta1.ProvisioningRequest - podTemplates []*v1.PodTemplate - want []*v1.Pod + pr *v1.ProvisioningRequest + podTemplates []*corev1.PodTemplate + want []*corev1.Pod wantErr bool }{ { desc: "simple ProvReq", - pr: &v1beta1.ProvisioningRequest{ + pr: &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pr-name", Namespace: "test-namespace", }, - Spec: v1beta1.ProvisioningRequestSpec{ - PodSets: []v1beta1.PodSet{ + Spec: v1.ProvisioningRequestSpec{ + PodSets: []v1.PodSet{ { Count: 1, - PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + PodTemplateRef: v1.Reference{Name: "template-1"}, }, }, ProvisioningClassName: testProvisioningClassName, }, }, - podTemplates: []*v1.PodTemplate{ + podTemplates: []*corev1.PodTemplate{ { ObjectMeta: metav1.ObjectMeta{ Name: "template-1", Namespace: "test-namespace", }, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container", Image: "test-image", @@ -109,36 +109,36 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - want: []*v1.Pod{ + want: []*corev1.Pod{ testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), }, }, { desc: "ProvReq already having taint", - pr: &v1beta1.ProvisioningRequest{ + pr: &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pr-name", Namespace: "test-namespace", }, - Spec: v1beta1.ProvisioningRequestSpec{ - PodSets: []v1beta1.PodSet{ + Spec: v1.ProvisioningRequestSpec{ + PodSets: []v1.PodSet{ { Count: 1, - PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + PodTemplateRef: v1.Reference{Name: "template-1"}, }, }, ProvisioningClassName: testProvisioningClassName, }, }, - podTemplates: []*v1.PodTemplate{ + podTemplates: []*corev1.PodTemplate{ { ObjectMeta: metav1.ObjectMeta{ Name: "template-1", Namespace: "test-namespace", }, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container", Image: "test-image", @@ -148,36 +148,36 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - want: []*v1.Pod{ + want: []*corev1.Pod{ testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), }, }, { desc: "ProvReq already having nodeSelector", - pr: &v1beta1.ProvisioningRequest{ + pr: &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pr-name", Namespace: "test-namespace", }, - Spec: v1beta1.ProvisioningRequestSpec{ - PodSets: []v1beta1.PodSet{ + Spec: v1.ProvisioningRequestSpec{ + PodSets: []v1.PodSet{ { Count: 1, - PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + PodTemplateRef: v1.Reference{Name: "template-1"}, }, }, ProvisioningClassName: testProvisioningClassName, }, }, - podTemplates: []*v1.PodTemplate{ + podTemplates: []*corev1.PodTemplate{ { ObjectMeta: metav1.ObjectMeta{ Name: "template-1", Namespace: "test-namespace", }, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container", Image: "test-image", @@ -187,40 +187,40 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - want: []*v1.Pod{ + want: []*corev1.Pod{ testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), }, }, { desc: "ProvReq with multiple pod sets", - pr: &v1beta1.ProvisioningRequest{ + pr: &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pr-name", Namespace: "test-namespace", }, - Spec: v1beta1.ProvisioningRequestSpec{ - PodSets: []v1beta1.PodSet{ + Spec: v1.ProvisioningRequestSpec{ + PodSets: []v1.PodSet{ { Count: 2, - PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + PodTemplateRef: v1.Reference{Name: "template-1"}, }, { Count: 3, - PodTemplateRef: v1beta1.Reference{Name: "template-2"}, + PodTemplateRef: v1.Reference{Name: "template-2"}, }, }, ProvisioningClassName: testProvisioningClassName, }, }, - podTemplates: []*v1.PodTemplate{ + podTemplates: []*corev1.PodTemplate{ { ObjectMeta: metav1.ObjectMeta{ Name: "template-1", Namespace: "test-namespace", }, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container", Image: "test-image", @@ -234,9 +234,9 @@ func TestPodsForProvisioningRequest(t *testing.T) { Name: "template-2", Namespace: "test-namespace", }, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container-2", Image: "test-image-2", @@ -246,7 +246,7 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - want: []*v1.Pod{ + want: []*corev1.Pod{ testPod("test-pr-name-0-0", "test-pr-name-", "test-container", "test-image", "test-pr-name"), testPod("test-pr-name-0-1", "test-pr-name-", "test-container", "test-image", "test-pr-name"), testPod("test-pr-name-1-0", "test-pr-name-", "test-container-2", "test-image-2", "test-pr-name"), @@ -256,37 +256,37 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, { desc: "PodTemplate doesn't specify container resources requests, Pods container default resources requests to limits", - pr: &v1beta1.ProvisioningRequest{ + pr: &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pr-name", Namespace: "test-namespace", }, - Spec: v1beta1.ProvisioningRequestSpec{ - PodSets: []v1beta1.PodSet{ + Spec: v1.ProvisioningRequestSpec{ + PodSets: []v1.PodSet{ { Count: 1, - PodTemplateRef: v1beta1.Reference{Name: "template-1"}, + PodTemplateRef: v1.Reference{Name: "template-1"}, }, }, ProvisioningClassName: testProvisioningClassName, }, }, - podTemplates: []*v1.PodTemplate{ + podTemplates: []*corev1.PodTemplate{ { ObjectMeta: metav1.ObjectMeta{ Name: "template-1", Namespace: "test-namespace", }, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container", Image: "test-image", - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("8"), - v1.ResourceMemory: resource.MustParse("8G"), + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("8G"), }, }, }, @@ -295,7 +295,7 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - want: []*v1.Pod{ + want: []*corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "test-pr-name-0-0", @@ -303,8 +303,8 @@ func TestPodsForProvisioningRequest(t *testing.T) { Namespace: "test-namespace", UID: types.UID(fmt.Sprintf("test-namespace/%s", "test-pr-name-0-0")), Annotations: map[string]string{ - v1beta1.ProvisioningRequestPodAnnotationKey: "test-pr-name", - v1beta1.ProvisioningClassPodAnnotationKey: testProvisioningClassName, + v1.ProvisioningRequestPodAnnotationKey: "test-pr-name", + v1.ProvisioningClassPodAnnotationKey: testProvisioningClassName, }, Labels: map[string]string{}, Finalizers: []string{}, @@ -315,19 +315,19 @@ func TestPodsForProvisioningRequest(t *testing.T) { }, }, }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "test-container", Image: "test-image", - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("8"), - v1.ResourceMemory: resource.MustParse("8G"), + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("8G"), }, - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("8"), - v1.ResourceMemory: resource.MustParse("8G"), + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("8G"), }, }, }, diff --git a/cluster-autoscaler/provisioningrequest/provreqclient/client.go b/cluster-autoscaler/provisioningrequest/provreqclient/client.go index d8963a4d99..941c3f40c9 100644 --- a/cluster-autoscaler/provisioningrequest/provreqclient/client.go +++ b/cluster-autoscaler/provisioningrequest/provreqclient/client.go @@ -26,14 +26,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned" "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/informers/externalversions" - listers "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1beta1" + listers "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/rest" klog "k8s.io/klog/v2" @@ -43,11 +43,11 @@ const ( provisioningRequestClientCallTimeout = 4 * time.Second ) -// ProvisioningRequestClient represents client for v1beta1 ProvReq CRD. +// ProvisioningRequestClient represents client for v1 ProvReq CRD. type ProvisioningRequestClient struct { client versioned.Interface provReqLister listers.ProvisioningRequestLister - podTemplLister v1.PodTemplateLister + podTemplLister corev1.PodTemplateLister } // NewProvisioningRequestClient configures and returns a provisioningRequestClient. @@ -81,36 +81,36 @@ func NewProvisioningRequestClient(kubeConfig *rest.Config) (*ProvisioningRequest // ProvisioningRequest gets a specific ProvisioningRequest CR. func (c *ProvisioningRequestClient) ProvisioningRequest(namespace, name string) (*provreqwrapper.ProvisioningRequest, error) { - v1Beta1PR, err := c.provReqLister.ProvisioningRequests(namespace).Get(name) + v1PR, err := c.provReqLister.ProvisioningRequests(namespace).Get(name) if err != nil { return nil, err } - podTemplates, err := c.FetchPodTemplates(v1Beta1PR) + podTemplates, err := c.FetchPodTemplates(v1PR) if err != nil { return nil, fmt.Errorf("while fetching pod templates for Get Provisioning Request %s/%s got error: %v", namespace, name, err) } - return provreqwrapper.NewProvisioningRequest(v1Beta1PR, podTemplates), nil + return provreqwrapper.NewProvisioningRequest(v1PR, podTemplates), nil } // ProvisioningRequests gets all ProvisioningRequest CRs. func (c *ProvisioningRequestClient) ProvisioningRequests() ([]*provreqwrapper.ProvisioningRequest, error) { - v1Beta1PRs, err := c.provReqLister.List(labels.Everything()) + v1PRs, err := c.provReqLister.List(labels.Everything()) if err != nil { return nil, fmt.Errorf("error fetching provisioningRequests: %w", err) } - prs := make([]*provreqwrapper.ProvisioningRequest, 0, len(v1Beta1PRs)) - for _, v1Beta1PR := range v1Beta1PRs { - podTemplates, errPodTemplates := c.FetchPodTemplates(v1Beta1PR) + prs := make([]*provreqwrapper.ProvisioningRequest, 0, len(v1PRs)) + for _, v1PR := range v1PRs { + podTemplates, errPodTemplates := c.FetchPodTemplates(v1PR) if errPodTemplates != nil { - return nil, fmt.Errorf("while fetching pod templates for List Provisioning Request %s/%s got error: %v", v1Beta1PR.Namespace, v1Beta1PR.Name, errPodTemplates) + return nil, fmt.Errorf("while fetching pod templates for List Provisioning Request %s/%s got error: %v", v1PR.Namespace, v1PR.Name, errPodTemplates) } - prs = append(prs, provreqwrapper.NewProvisioningRequest(v1Beta1PR, podTemplates)) + prs = append(prs, provreqwrapper.NewProvisioningRequest(v1PR, podTemplates)) } return prs, nil } // FetchPodTemplates fetches PodTemplates referenced by the Provisioning Request. -func (c *ProvisioningRequestClient) FetchPodTemplates(pr *v1beta1.ProvisioningRequest) ([]*apiv1.PodTemplate, error) { +func (c *ProvisioningRequestClient) FetchPodTemplates(pr *v1.ProvisioningRequest) ([]*apiv1.PodTemplate, error) { podTemplates := make([]*apiv1.PodTemplate, 0, len(pr.Spec.PodSets)) for _, podSpec := range pr.Spec.PodSets { podTemplate, err := c.podTemplLister.PodTemplates(pr.Namespace).Get(podSpec.PodTemplateRef.Name) @@ -126,7 +126,7 @@ func (c *ProvisioningRequestClient) FetchPodTemplates(pr *v1beta1.ProvisioningRe } // UpdateProvisioningRequest updates the given ProvisioningRequest CR by propagating the changes using the ProvisioningRequestInterface and returns the updated instance or the original one in case of an error. -func (c *ProvisioningRequestClient) UpdateProvisioningRequest(pr *v1beta1.ProvisioningRequest) (*v1beta1.ProvisioningRequest, error) { +func (c *ProvisioningRequestClient) UpdateProvisioningRequest(pr *v1.ProvisioningRequest) (*v1.ProvisioningRequest, error) { ctx, cancel := context.WithTimeout(context.Background(), provisioningRequestClientCallTimeout) defer cancel() @@ -134,8 +134,8 @@ func (c *ProvisioningRequestClient) UpdateProvisioningRequest(pr *v1beta1.Provis // the default null template.metadata.creationTimestamp field of PodTemplateSpec // will not generate false error logs as a side effect. prCopy := pr.DeepCopy() - prCopy.Spec = v1beta1.ProvisioningRequestSpec{} - updatedPr, err := c.client.AutoscalingV1beta1().ProvisioningRequests(prCopy.Namespace).UpdateStatus(ctx, prCopy, metav1.UpdateOptions{}) + prCopy.Spec = v1.ProvisioningRequestSpec{} + updatedPr, err := c.client.AutoscalingV1().ProvisioningRequests(prCopy.Namespace).UpdateStatus(ctx, prCopy, metav1.UpdateOptions{}) if err != nil { return pr, err } @@ -151,7 +151,7 @@ func newPRClient(kubeConfig *rest.Config) (*versioned.Clientset, error) { // newPRsLister creates a lister for the Provisioning Requests in the cluster. func newPRsLister(prClient versioned.Interface, stopChannel <-chan struct{}) (listers.ProvisioningRequestLister, error) { factory := externalversions.NewSharedInformerFactory(prClient, 1*time.Hour) - provReqLister := factory.Autoscaling().V1beta1().ProvisioningRequests().Lister() + provReqLister := factory.Autoscaling().V1().ProvisioningRequests().Lister() factory.Start(stopChannel) informersSynced := factory.WaitForCacheSync(stopChannel) for _, synced := range informersSynced { @@ -164,7 +164,7 @@ func newPRsLister(prClient versioned.Interface, stopChannel <-chan struct{}) (li } // newPodTemplatesLister creates a lister for the Pod Templates in the cluster. -func newPodTemplatesLister(client *kubernetes.Clientset, stopChannel <-chan struct{}) (v1.PodTemplateLister, error) { +func newPodTemplatesLister(client *kubernetes.Clientset, stopChannel <-chan struct{}) (corev1.PodTemplateLister, error) { factory := informers.NewSharedInformerFactory(client, 1*time.Hour) podTemplLister := factory.Core().V1().PodTemplates().Lister() factory.Start(stopChannel) @@ -206,11 +206,11 @@ func ProvisioningRequestsForPods(client *ProvisioningRequestClient, unschedulabl } // DeleteProvisioningRequest deletes the given ProvisioningRequest CR using the ProvisioningRequestInterface and returns an error in case of failure. -func (c *ProvisioningRequestClient) DeleteProvisioningRequest(pr *v1beta1.ProvisioningRequest) error { +func (c *ProvisioningRequestClient) DeleteProvisioningRequest(pr *v1.ProvisioningRequest) error { ctx, cancel := context.WithTimeout(context.Background(), provisioningRequestClientCallTimeout) defer cancel() - err := c.client.AutoscalingV1beta1().ProvisioningRequests(pr.Namespace).Delete(ctx, pr.Name, metav1.DeleteOptions{}) + err := c.client.AutoscalingV1().ProvisioningRequests(pr.Namespace).Delete(ctx, pr.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("error deleting ProvisioningRequest %s/%s: %w", pr.Namespace, pr.Name, err) } diff --git a/cluster-autoscaler/provisioningrequest/provreqclient/client_test.go b/cluster-autoscaler/provisioningrequest/provreqclient/client_test.go index 0ec9a610c6..90abfde4d6 100644 --- a/cluster-autoscaler/provisioningrequest/provreqclient/client_test.go +++ b/cluster-autoscaler/provisioningrequest/provreqclient/client_test.go @@ -24,7 +24,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" @@ -50,7 +50,7 @@ func TestFetchPodTemplates(t *testing.T) { } func TestProvisioningRequestsForPods(t *testing.T) { - checkCapacityProvReq := provreqwrapper.BuildTestProvisioningRequest("ns", "check-capacity", "1m", "100", "", int32(100), false, time.Now(), v1beta1.ProvisioningClassCheckCapacity) + checkCapacityProvReq := provreqwrapper.BuildTestProvisioningRequest("ns", "check-capacity", "1m", "100", "", int32(100), false, time.Now(), v1.ProvisioningClassCheckCapacity) customProvReq := provreqwrapper.BuildTestProvisioningRequest("ns", "custom", "1m", "100", "", int32(100), false, time.Now(), "custom") checkCapacityPods, _ := pods.PodsForProvisioningRequest(checkCapacityProvReq) customProvReqPods, _ := pods.PodsForProvisioningRequest(customProvReq) diff --git a/cluster-autoscaler/provisioningrequest/provreqclient/testutils.go b/cluster-autoscaler/provisioningrequest/provreqclient/testutils.go index 36230bf757..5d10cc09b9 100644 --- a/cluster-autoscaler/provisioningrequest/provreqclient/testutils.go +++ b/cluster-autoscaler/provisioningrequest/provreqclient/testutils.go @@ -24,13 +24,13 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/fake" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" fake_kubernetes "k8s.io/client-go/kubernetes/fake" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" klog "k8s.io/klog/v2" ) @@ -43,7 +43,7 @@ func NewFakeProvisioningRequestClient(ctx context.Context, t *testing.T, prs ... if pr == nil { continue } - if _, err := provReqClient.AutoscalingV1beta1().ProvisioningRequests(pr.Namespace).Create(ctx, pr.ProvisioningRequest, metav1.CreateOptions{}); err != nil { + if _, err := provReqClient.AutoscalingV1().ProvisioningRequests(pr.Namespace).Create(ctx, pr.ProvisioningRequest, metav1.CreateOptions{}); err != nil { t.Errorf("While adding a ProvisioningRequest: %s/%s to fake client, got error: %v", pr.Namespace, pr.Name, err) } for _, pd := range pr.PodTemplates { @@ -68,7 +68,7 @@ func NewFakeProvisioningRequestClient(ctx context.Context, t *testing.T, prs ... } // newFakePodTemplatesLister creates a fake lister for the Pod Templates in the cluster. -func newFakePodTemplatesLister(t *testing.T, client kubernetes.Interface, channel <-chan struct{}) (v1.PodTemplateLister, error) { +func newFakePodTemplatesLister(t *testing.T, client kubernetes.Interface, channel <-chan struct{}) (corev1.PodTemplateLister, error) { t.Helper() factory := informers.NewSharedInformerFactory(client, 1*time.Hour) podTemplLister := factory.Core().V1().PodTemplates().Lister() @@ -106,28 +106,28 @@ func ProvisioningRequestWrapperForTesting(namespace, name string) *provreqwrappe }, }, } - v1Beta1PR := &v1beta1.ProvisioningRequest{ + v1PR := &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: v1beta1.ProvisioningRequestSpec{ + Spec: v1.ProvisioningRequestSpec{ ProvisioningClassName: "test-class", - PodSets: []v1beta1.PodSet{ + PodSets: []v1.PodSet{ { Count: 1, - PodTemplateRef: v1beta1.Reference{ + PodTemplateRef: v1.Reference{ Name: podTemplates[0].Name, }, }, }, }, - Status: v1beta1.ProvisioningRequestStatus{ - ProvisioningClassDetails: map[string]v1beta1.Detail{}, + Status: v1.ProvisioningRequestStatus{ + ProvisioningClassDetails: map[string]v1.Detail{}, }, } - pr := provreqwrapper.NewProvisioningRequest(v1Beta1PR, podTemplates) + pr := provreqwrapper.NewProvisioningRequest(v1PR, podTemplates) return pr } @@ -139,13 +139,13 @@ func podTemplateNameFromName(name string) string { func (c *ProvisioningRequestClient) ProvisioningRequestNoCache(namespace, name string) (*provreqwrapper.ProvisioningRequest, error) { ctx, cancel := context.WithTimeout(context.Background(), provisioningRequestClientCallTimeout) defer cancel() - v1beta1, err := c.client.AutoscalingV1beta1().ProvisioningRequests(namespace).Get(ctx, name, metav1.GetOptions{}) + v1, err := c.client.AutoscalingV1().ProvisioningRequests(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } - podTemplates, err := c.FetchPodTemplates(v1beta1) + podTemplates, err := c.FetchPodTemplates(v1) if err != nil { return nil, err } - return provreqwrapper.NewProvisioningRequest(v1beta1, podTemplates), nil + return provreqwrapper.NewProvisioningRequest(v1, podTemplates), nil } diff --git a/cluster-autoscaler/provisioningrequest/provreqwrapper/testutils.go b/cluster-autoscaler/provisioningrequest/provreqwrapper/testutils.go index 1954978e3f..570525c3ec 100644 --- a/cluster-autoscaler/provisioningrequest/provreqwrapper/testutils.go +++ b/cluster-autoscaler/provisioningrequest/provreqwrapper/testutils.go @@ -23,8 +23,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" ) // TestProvReqOptions is a helper struct to make constructing test ProvisioningRequest object easier. @@ -87,22 +86,22 @@ func BuildTestProvisioningRequest(namespace, name, cpu, memory, gpu string, podC } } return NewProvisioningRequest( - &v1beta1.ProvisioningRequest{ + &v1.ProvisioningRequest{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, - CreationTimestamp: v1.NewTime(creationTimestamp), + CreationTimestamp: metav1.NewTime(creationTimestamp), }, - Spec: v1beta1.ProvisioningRequestSpec{ + Spec: v1.ProvisioningRequestSpec{ ProvisioningClassName: class, - PodSets: []v1beta1.PodSet{ + PodSets: []v1.PodSet{ { - PodTemplateRef: v1beta1.Reference{Name: fmt.Sprintf("%s-template-name", name)}, + PodTemplateRef: v1.Reference{Name: fmt.Sprintf("%s-template-name", name)}, Count: podCount, }, }, }, - Status: v1beta1.ProvisioningRequestStatus{ + Status: v1.ProvisioningRequestStatus{ Conditions: []metav1.Condition{}, }, }, @@ -111,7 +110,7 @@ func BuildTestProvisioningRequest(namespace, name, cpu, memory, gpu string, podC ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-template-name", name), Namespace: namespace, - CreationTimestamp: v1.NewTime(creationTimestamp), + CreationTimestamp: metav1.NewTime(creationTimestamp), }, Template: apiv1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper.go b/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper.go index eb8141e88a..7ec6edd089 100644 --- a/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper.go +++ b/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper.go @@ -22,12 +22,12 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" ) // ProvisioningRequest wrapper representation of the ProvisioningRequest type ProvisioningRequest struct { - *v1beta1.ProvisioningRequest + *v1.ProvisioningRequest PodTemplates []*apiv1.PodTemplate } @@ -39,8 +39,8 @@ type PodSet struct { PodTemplate apiv1.PodTemplateSpec } -// NewProvisioningRequest creates new ProvisioningRequest based on v1beta1 CR. -func NewProvisioningRequest(pr *v1beta1.ProvisioningRequest, podTemplates []*apiv1.PodTemplate) *ProvisioningRequest { +// NewProvisioningRequest creates new ProvisioningRequest based on v1 CR. +func NewProvisioningRequest(pr *v1.ProvisioningRequest, podTemplates []*apiv1.PodTemplate) *ProvisioningRequest { return &ProvisioningRequest{ ProvisioningRequest: pr, PodTemplates: podTemplates, @@ -69,7 +69,7 @@ func (pr *ProvisioningRequest) PodSets() ([]PodSet, error) { } // errMissingPodTemplates creates error that is passed when there are missing pod templates. -func errMissingPodTemplates(podSets []v1beta1.PodSet, podTemplates []*apiv1.PodTemplate) error { +func errMissingPodTemplates(podSets []v1.PodSet, podTemplates []*apiv1.PodTemplate) error { foundPodTemplates := map[string]struct{}{} for _, pt := range podTemplates { foundPodTemplates[pt.Name] = struct{}{} diff --git a/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper_test.go b/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper_test.go index e1ae3e0ff5..7d57af5d92 100644 --- a/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper_test.go +++ b/cluster-autoscaler/provisioningrequest/provreqwrapper/wrapper_test.go @@ -24,7 +24,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" ) func TestProvisioningRequestWrapper(t *testing.T) { @@ -74,7 +74,7 @@ func TestProvisioningRequestWrapper(t *testing.T) { }, }, } - v1Beta1PR := &v1beta1.ProvisioningRequest{ + v1PR := &v1.ProvisioningRequest{ TypeMeta: metav1.TypeMeta{ APIVersion: "beta-api", Kind: "beta-kind", @@ -85,24 +85,24 @@ func TestProvisioningRequestWrapper(t *testing.T) { CreationTimestamp: creationTimestamp, UID: types.UID("beta-uid"), }, - Spec: v1beta1.ProvisioningRequestSpec{ + Spec: v1.ProvisioningRequestSpec{ ProvisioningClassName: "queued-provisioning.gke.io", - PodSets: []v1beta1.PodSet{ + PodSets: []v1.PodSet{ { Count: 1, - PodTemplateRef: v1beta1.Reference{ + PodTemplateRef: v1.Reference{ Name: podTemplates[0].Name, }, }, }, }, - Status: v1beta1.ProvisioningRequestStatus{ + Status: v1.ProvisioningRequestStatus{ Conditions: conditions, - ProvisioningClassDetails: map[string]v1beta1.Detail{}, + ProvisioningClassDetails: map[string]v1.Detail{}, }, } - wrappedBetaPR := NewProvisioningRequest(v1Beta1PR, podTemplates) + wrappedBetaPR := NewProvisioningRequest(v1PR, podTemplates) // Check Name, Namespace and Creation accessors assert.Equal(t, "name-beta", wrappedBetaPR.Name) @@ -131,11 +131,11 @@ func TestProvisioningRequestWrapper(t *testing.T) { assert.Equal(t, podSets, betaPodSets) // Check the type accessors. - assert.Equal(t, v1Beta1PR, wrappedBetaPR.ProvisioningRequest) + assert.Equal(t, v1PR, wrappedBetaPR.ProvisioningRequest) assert.Equal(t, podTemplates, wrappedBetaPR.PodTemplates) // Check case where the Provisioning Request is missing Pod Templates. - wrappedBetaPRMissingPodTemplates := NewProvisioningRequest(v1Beta1PR, nil) + wrappedBetaPRMissingPodTemplates := NewProvisioningRequest(v1PR, nil) podSets, err := wrappedBetaPRMissingPodTemplates.PodSets() assert.Nil(t, podSets) assert.EqualError(t, err, "missing pod templates, 1 pod templates were referenced, 1 templates were missing: name-pod-template-beta") diff --git a/cluster-autoscaler/provisioningrequest/supported_classes.go b/cluster-autoscaler/provisioningrequest/supported_classes.go index cff7a7faa6..807e15bd71 100644 --- a/cluster-autoscaler/provisioningrequest/supported_classes.go +++ b/cluster-autoscaler/provisioningrequest/supported_classes.go @@ -17,12 +17,12 @@ limitations under the License. package provisioningrequest import ( - "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1" + "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" ) // SupportedProvisioningClasses is a set of ProvisioningRequest classes // supported by Cluster Autoscaler. var SupportedProvisioningClasses = map[string]bool{ - v1beta1.ProvisioningClassCheckCapacity: true, - v1beta1.ProvisioningClassBestEffortAtomicScaleUp: true, + v1.ProvisioningClassCheckCapacity: true, + v1.ProvisioningClassBestEffortAtomicScaleUp: true, }