Merge branch 'master' into feat/partitionable-devices-support

This commit is contained in:
MenD32 2025-06-12 12:58:11 +03:00 committed by GitHub
commit 6f594c5c47
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 983 additions and 200 deletions

View File

@ -5,9 +5,9 @@ go 1.24.0
require (
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.35.1
k8s.io/apimachinery v0.33.0-beta.0
k8s.io/client-go v0.33.0-beta.0
k8s.io/code-generator v0.33.0-beta.0
k8s.io/apimachinery v0.34.0-alpha.0
k8s.io/client-go v0.34.0-alpha.0
k8s.io/code-generator v0.34.0-alpha.0
sigs.k8s.io/structured-merge-diff/v4 v4.6.0
)
@ -35,22 +35,22 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.29.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.33.0-beta.0 // indirect
k8s.io/api v0.34.0-alpha.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect

View File

@ -93,26 +93,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -137,20 +137,20 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.33.0-beta.0 h1:/sAUrfXsjKPST2mZjpWhjRdzSR6SD5KlJpiOgCQQhAQ=
k8s.io/api v0.33.0-beta.0/go.mod h1:TYyCgedkG4OVS4+4D2n25BdbMcexMSLx6Y7OkAzkxLQ=
k8s.io/apimachinery v0.33.0-beta.0 h1:vLDBChfQwyimk6AbuT7OZOIqxSg/44JlXuxqBk85j68=
k8s.io/apimachinery v0.33.0-beta.0/go.mod h1:S2OIkExGqJOXYSYcAJwQ9zWcc6BkBUdTJUu4M7z0cvo=
k8s.io/client-go v0.33.0-beta.0 h1:xRGKK5hU39pb6CFDCDOOlG+LEenB93/RK9hoP4eyAsU=
k8s.io/client-go v0.33.0-beta.0/go.mod h1:RF6hSu+FncpgHQs1zA1UfGbMq8gxay89r37bCQe+Mj4=
k8s.io/code-generator v0.33.0-beta.0 h1:QYiWYFUT9G7lnF1ucDYr/sZUaG/kptrooX2PJxEL+Go=
k8s.io/code-generator v0.33.0-beta.0/go.mod h1:RBvFpvqtyQygCBjMayNyYqdzy+89LdzqAx0Th+dgmzQ=
k8s.io/api v0.34.0-alpha.0 h1:plVaaO0yCTOGvWjEiEvvecQOPpf/IYdLnVMsfGfGMQo=
k8s.io/api v0.34.0-alpha.0/go.mod h1:brriDRpq4yMP4PN4P48NfXVLwWSwaIFSe0+pOajiwjQ=
k8s.io/apimachinery v0.34.0-alpha.0 h1:arymqm+uCpPEAVWBCvNF+yq01AJzsoUeUd2DYpoHuzc=
k8s.io/apimachinery v0.34.0-alpha.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.34.0-alpha.0 h1:+hfihZ7vffuzoS4BoYg2nWs+9Bc1hXpZ7+iev2ISCo0=
k8s.io/client-go v0.34.0-alpha.0/go.mod h1:0sClwbFRpXuYhqaJEqLiy+e9dlC7FOhFHc9ZdvLDAbU=
k8s.io/code-generator v0.34.0-alpha.0 h1:aM4APBz/eAR8Qw4RWiCpfocZ2O2UUTi0UqTfvalouHc=
k8s.io/code-generator v0.34.0-alpha.0/go.mod h1:lwzb0eIHnmHnkhcHbxXf87XR512Xm7mF2RHtDKEW71c=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 h1:t0huyHnz6HsokckRxAF1bY0cqPFwzINKCL7yltEjZQc=
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=

View File

@ -276,8 +276,17 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
stateUpdateStart := time.Now()
var draSnapshot *drasnapshot.Snapshot
if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
var err error
draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
if err != nil {
return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
}
}
// Get nodes and pods currently living on cluster
allNodes, readyNodes, typedErr := a.obtainNodeLists()
allNodes, readyNodes, typedErr := a.obtainNodeLists(draSnapshot)
if typedErr != nil {
klog.Errorf("Failed to get node list: %v", typedErr)
return typedErr
@ -302,6 +311,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
klog.Errorf("Failed to get daemonset list: %v", err)
return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
}
// Snapshot scale-down actuation status before cache refresh.
scaleDownActuationStatus := a.scaleDownActuator.CheckStatus()
// Call CloudProvider.Refresh before any other calls to cloud provider.
@ -335,14 +345,6 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
}
nonExpendableScheduledPods := core_utils.FilterOutExpendablePods(originalScheduledPods, a.ExpendablePodsPriorityCutoff)
var draSnapshot *drasnapshot.Snapshot
if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
if err != nil {
return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
}
}
if err := a.ClusterSnapshot.SetClusterState(allNodes, nonExpendableScheduledPods, draSnapshot); err != nil {
return caerrors.ToAutoscalerError(caerrors.InternalError, err).AddPrefix("failed to initialize ClusterSnapshot: ")
}
@ -980,7 +982,7 @@ func (a *StaticAutoscaler) ExitCleanUp() {
a.clusterStateRegistry.Stop()
}
func (a *StaticAutoscaler) obtainNodeLists() ([]*apiv1.Node, []*apiv1.Node, caerrors.AutoscalerError) {
func (a *StaticAutoscaler) obtainNodeLists(draSnapshot *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node, caerrors.AutoscalerError) {
allNodes, err := a.AllNodeLister().List()
if err != nil {
klog.Errorf("Failed to list all nodes: %v", err)
@ -998,7 +1000,7 @@ func (a *StaticAutoscaler) obtainNodeLists() ([]*apiv1.Node, []*apiv1.Node, caer
// Treat those nodes as unready until GPU actually becomes available and let
// our normal handling for booting up nodes deal with this.
// TODO: Remove this call when we handle dynamically provisioned resources.
allNodes, readyNodes = a.processors.CustomResourcesProcessor.FilterOutNodesWithUnreadyResources(a.AutoscalingContext, allNodes, readyNodes)
allNodes, readyNodes = a.processors.CustomResourcesProcessor.FilterOutNodesWithUnreadyResources(a.AutoscalingContext, allNodes, readyNodes, draSnapshot)
allNodes, readyNodes = taints.FilterOutNodesWithStartupTaints(a.taintConfig, allNodes, readyNodes)
return allNodes, readyNodes, nil
}

View File

@ -181,8 +181,8 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
req1Nic := testDeviceRequest{name: "req1Nic", count: 1, selectors: singleAttrSelector(exampleDriver, nicAttribute, nicTypeA)}
req1Global := testDeviceRequest{name: "req1Global", count: 1, selectors: singleAttrSelector(exampleDriver, globalDevAttribute, globalDevTypeA)}
sharedGpuBClaim := testResourceClaim("sharedGpuBClaim", nil, "", []testDeviceRequest{req1GpuB}, nil, nil)
sharedAllocatedGlobalClaim := testResourceClaim("sharedGlobalClaim", nil, "", []testDeviceRequest{req1Global}, []testAllocation{{request: req1Global.name, driver: exampleDriver, pool: "global-pool", device: globalDevice + "-0"}}, nil)
sharedGpuBClaim := testResourceClaim("sharedGpuBClaim", nil, "", []testDeviceRequest{req1GpuB}, nil)
sharedAllocatedGlobalClaim := testResourceClaim("sharedGlobalClaim", nil, "", []testDeviceRequest{req1Global}, []testAllocation{{request: req1Global.name, driver: exampleDriver, pool: "global-pool", device: globalDevice + "-0"}})
testCases := map[string]struct {
nodeGroups map[*testNodeGroupDef]int
@ -250,10 +250,8 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
expectedScaleUps: map[string]int{node1Gpu1Nic1slice.name: 3},
},
"scale-up: scale from 0 nodes in a node group": {
nodeGroups: map[*testNodeGroupDef]int{node1Gpu1Nic1slice: 0},
pods: append(
unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
),
nodeGroups: map[*testNodeGroupDef]int{node1Gpu1Nic1slice: 0},
pods: unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
expectedScaleUps: map[string]int{node1Gpu1Nic1slice.name: 3},
},
"scale-up: scale from 0 nodes in a node group, with pods on the template nodes consuming DRA resources": {
@ -264,9 +262,7 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
scheduledPod(baseSmallPod, "template-1", node3GpuA1slice.name+"-template", map[*testDeviceRequest][]string{&req1GpuA: {gpuDevice + "-1"}}),
},
},
pods: append(
unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}),
),
pods: unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}),
expectedScaleUps: map[string]int{node3GpuA1slice.name: 3},
},
"scale-up: scale from 0 nodes in a node group, with pods on the template nodes consuming DRA resources, including shared claims": {
@ -278,16 +274,12 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
scheduledPod(baseSmallPod, "template-1", node3GpuA1slice.name+"-template", map[*testDeviceRequest][]string{&req1GpuA: {gpuDevice + "-1"}}, sharedAllocatedGlobalClaim),
},
},
pods: append(
unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}, sharedAllocatedGlobalClaim),
),
pods: unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}, sharedAllocatedGlobalClaim),
expectedScaleUps: map[string]int{node3GpuA1slice.name: 3},
},
"no scale-up: pods requesting multiple different devices, but they're on different nodes": {
nodeGroups: map[*testNodeGroupDef]int{node1GpuA1slice: 1, node1Nic1slice: 1},
pods: append(
unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
),
pods: unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
},
"scale-up: pods requesting a shared, unallocated claim": {
extraResourceClaims: []*resourceapi.ResourceClaim{sharedGpuBClaim},
@ -597,13 +589,13 @@ func resourceClaimsForPod(pod *apiv1.Pod, nodeName string, claimCount int, reque
}
}
claims = append(claims, testResourceClaim(name, pod, nodeName, claimRequests, claimAllocations, nil))
claims = append(claims, testResourceClaim(name, pod, nodeName, claimRequests, claimAllocations))
}
return claims
}
func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string, requests []testDeviceRequest, allocations []testAllocation, reservedFor []*apiv1.Pod) *resourceapi.ResourceClaim {
func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string, requests []testDeviceRequest, allocations []testAllocation) *resourceapi.ResourceClaim {
var deviceRequests []resourceapi.DeviceRequest
for _, request := range requests {
var selectors []resourceapi.DeviceSelector
@ -673,15 +665,6 @@ func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string,
UID: owningPod.UID,
},
}
} else {
for _, pod := range podReservations {
podReservations = append(podReservations, resourceapi.ResourceClaimConsumerReference{
APIGroup: "",
Resource: "pods",
Name: pod.Name,
UID: pod.UID,
})
}
}
claim.Status = resourceapi.ResourceClaimStatus{
Allocation: &resourceapi.AllocationResult{

View File

@ -41,20 +41,20 @@ require (
google.golang.org/protobuf v1.36.5
gopkg.in/gcfg.v1 v1.2.3
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.33.1
k8s.io/apimachinery v0.33.1
k8s.io/apiserver v0.33.0-beta.0
k8s.io/api v0.34.0-alpha.0
k8s.io/apimachinery v0.34.0-alpha.0
k8s.io/apiserver v0.34.0-alpha.0
k8s.io/autoscaler/cluster-autoscaler/apis v0.0.0-20240627115740-d52e4b9665d7
k8s.io/client-go v0.33.0-beta.0
k8s.io/client-go v0.34.0-alpha.0
k8s.io/cloud-provider v0.30.1
k8s.io/cloud-provider-aws v1.27.0
k8s.io/cloud-provider-gcp/providers v0.28.2
k8s.io/component-base v0.33.0-beta.0
k8s.io/component-helpers v0.33.0-beta.0
k8s.io/component-base v0.34.0-alpha.0
k8s.io/component-helpers v0.34.0-alpha.0
k8s.io/dynamic-resource-allocation v0.0.0
k8s.io/klog/v2 v2.130.1
k8s.io/kubelet v0.33.0-beta.0
k8s.io/kubernetes v1.33.0-beta.0
k8s.io/kubelet v0.34.0-alpha.0
k8s.io/kubernetes v1.34.0-alpha.0
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/cloud-provider-azure v1.29.4
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13
@ -122,7 +122,7 @@ require (
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
@ -134,7 +134,7 @@ require (
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@ -171,9 +171,9 @@ require (
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 // indirect
@ -203,14 +203,14 @@ require (
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.0.0 // indirect
k8s.io/code-generator v0.33.0-beta.0 // indirect
k8s.io/controller-manager v0.33.0-beta.0 // indirect
k8s.io/cri-api v0.33.0-beta.0 // indirect
k8s.io/code-generator v0.34.0-alpha.0 // indirect
k8s.io/controller-manager v0.34.0-alpha.0 // indirect
k8s.io/cri-api v0.34.0-alpha.0 // indirect
k8s.io/cri-client v0.0.0 // indirect
k8s.io/csi-translation-lib v0.27.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
k8s.io/kms v0.33.0-beta.0 // indirect
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 // indirect
k8s.io/kms v0.34.0-alpha.0 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
k8s.io/kube-scheduler v0.0.0 // indirect
k8s.io/kubectl v0.28.0 // indirect
k8s.io/mount-utils v0.26.0-alpha.0 // indirect
@ -227,66 +227,66 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0
replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0
replace k8s.io/api => k8s.io/api v0.33.1
replace k8s.io/api => k8s.io/api v0.34.0-alpha.0
replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.0-beta.0
replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.0-alpha.0
replace k8s.io/apimachinery => k8s.io/apimachinery v0.33.0-beta.0
replace k8s.io/apimachinery => k8s.io/apimachinery v0.34.0-alpha.0
replace k8s.io/apiserver => k8s.io/apiserver v0.33.0-beta.0
replace k8s.io/apiserver => k8s.io/apiserver v0.34.0-alpha.0
replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.0-beta.0
replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.34.0-alpha.0
replace k8s.io/client-go => k8s.io/client-go v0.33.0-beta.0
replace k8s.io/client-go => k8s.io/client-go v0.34.0-alpha.0
replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.0-beta.0
replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.34.0-alpha.0
replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.0-beta.0
replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.34.0-alpha.0
replace k8s.io/code-generator => k8s.io/code-generator v0.33.0-beta.0
replace k8s.io/code-generator => k8s.io/code-generator v0.34.0-alpha.0
replace k8s.io/component-base => k8s.io/component-base v0.33.0-beta.0
replace k8s.io/component-base => k8s.io/component-base v0.34.0-alpha.0
replace k8s.io/component-helpers => k8s.io/component-helpers v0.33.0-beta.0
replace k8s.io/component-helpers => k8s.io/component-helpers v0.34.0-alpha.0
replace k8s.io/controller-manager => k8s.io/controller-manager v0.33.0-beta.0
replace k8s.io/controller-manager => k8s.io/controller-manager v0.34.0-alpha.0
replace k8s.io/cri-api => k8s.io/cri-api v0.33.0-beta.0
replace k8s.io/cri-api => k8s.io/cri-api v0.34.0-alpha.0
replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.0-beta.0
replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.34.0-alpha.0
replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.0-beta.0
replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.34.0-alpha.0
replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.0-beta.0
replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.34.0-alpha.0
replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.0-beta.0
replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.34.0-alpha.0
replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.0-beta.0
replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.0-alpha.0
replace k8s.io/kubectl => k8s.io/kubectl v0.33.0-beta.0
replace k8s.io/kubectl => k8s.io/kubectl v0.34.0-alpha.0
replace k8s.io/kubelet => k8s.io/kubelet v0.33.0-beta.0
replace k8s.io/kubelet => k8s.io/kubelet v0.34.0-alpha.0
replace k8s.io/metrics => k8s.io/metrics v0.33.0-beta.0
replace k8s.io/metrics => k8s.io/metrics v0.34.0-alpha.0
replace k8s.io/mount-utils => k8s.io/mount-utils v0.33.0-beta.0
replace k8s.io/mount-utils => k8s.io/mount-utils v0.34.0-alpha.0
replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.0-beta.0
replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.34.0-alpha.0
replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.33.0-beta.0
replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.34.0-alpha.0
replace k8s.io/sample-controller => k8s.io/sample-controller v0.33.0-beta.0
replace k8s.io/sample-controller => k8s.io/sample-controller v0.34.0-alpha.0
replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.0-beta.0
replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.34.0-alpha.0
replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.0-beta.0
replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.34.0-alpha.0
replace k8s.io/kms => k8s.io/kms v0.33.0-beta.0
replace k8s.io/kms => k8s.io/kms v0.34.0-alpha.0
replace k8s.io/endpointslice => k8s.io/endpointslice v0.33.0-beta.0
replace k8s.io/endpointslice => k8s.io/endpointslice v0.34.0-alpha.0
replace k8s.io/autoscaler/cluster-autoscaler/apis => ./apis
replace k8s.io/cri-client => k8s.io/cri-client v0.33.0-beta.0
replace k8s.io/cri-client => k8s.io/cri-client v0.34.0-alpha.0
replace k8s.io/externaljwt => k8s.io/externaljwt v0.33.0-beta.0
replace k8s.io/externaljwt => k8s.io/externaljwt v0.34.0-alpha.0

View File

@ -191,8 +191,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@ -244,8 +244,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
@ -384,20 +384,20 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
@ -597,56 +597,56 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw=
k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw=
k8s.io/apiextensions-apiserver v0.33.0-beta.0 h1:3oqBvfd26IOekt96KEfE8A0wA/k1wDSBfTPirkRun1Q=
k8s.io/apiextensions-apiserver v0.33.0-beta.0/go.mod h1:TKTeoFcmGvtiDNV+wj8wJfZhamZNOhvi9yOIE2d1iWs=
k8s.io/apimachinery v0.33.0-beta.0 h1:vLDBChfQwyimk6AbuT7OZOIqxSg/44JlXuxqBk85j68=
k8s.io/apimachinery v0.33.0-beta.0/go.mod h1:S2OIkExGqJOXYSYcAJwQ9zWcc6BkBUdTJUu4M7z0cvo=
k8s.io/apiserver v0.33.0-beta.0 h1:EGjNQ4ocOGEq/KaYFuBS6MiUxZL9WmySu+QpMz+sBrk=
k8s.io/apiserver v0.33.0-beta.0/go.mod h1:6gxw8BX1YZxi2NtOsFIoURP9bVRkP3sNqle0KVXz1cA=
k8s.io/client-go v0.33.0-beta.0 h1:xRGKK5hU39pb6CFDCDOOlG+LEenB93/RK9hoP4eyAsU=
k8s.io/client-go v0.33.0-beta.0/go.mod h1:RF6hSu+FncpgHQs1zA1UfGbMq8gxay89r37bCQe+Mj4=
k8s.io/cloud-provider v0.33.0-beta.0 h1:SIeUrmbGz8dZZ0B0zIFPpEpV+5oGqVAFDhJ7YiFxZTU=
k8s.io/cloud-provider v0.33.0-beta.0/go.mod h1:n5dF1uxbcax4W1WSMMUoP/S7Hjs6W1R5BdCjW0skZM8=
k8s.io/api v0.34.0-alpha.0 h1:plVaaO0yCTOGvWjEiEvvecQOPpf/IYdLnVMsfGfGMQo=
k8s.io/api v0.34.0-alpha.0/go.mod h1:brriDRpq4yMP4PN4P48NfXVLwWSwaIFSe0+pOajiwjQ=
k8s.io/apiextensions-apiserver v0.34.0-alpha.0 h1:2rk3mhFhb3X7LqNThP2V82UAp2SvLv7UHAn4LcPDPD0=
k8s.io/apiextensions-apiserver v0.34.0-alpha.0/go.mod h1:aRq9qS8T7BKsEZKuwNWitDeO+iKpe04vYK2xHU4NNQ0=
k8s.io/apimachinery v0.34.0-alpha.0 h1:arymqm+uCpPEAVWBCvNF+yq01AJzsoUeUd2DYpoHuzc=
k8s.io/apimachinery v0.34.0-alpha.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/apiserver v0.34.0-alpha.0 h1:/di3tEpcgVmRQ+8eBHogwdBR5vehH+hRnMH32ndEtOY=
k8s.io/apiserver v0.34.0-alpha.0/go.mod h1:Cc7sDNMFnR5BghLjQpJmUsq4aUNp7iWCZ55RrKW04Y8=
k8s.io/client-go v0.34.0-alpha.0 h1:+hfihZ7vffuzoS4BoYg2nWs+9Bc1hXpZ7+iev2ISCo0=
k8s.io/client-go v0.34.0-alpha.0/go.mod h1:0sClwbFRpXuYhqaJEqLiy+e9dlC7FOhFHc9ZdvLDAbU=
k8s.io/cloud-provider v0.34.0-alpha.0 h1:+iUGvbyWM4CHiKbmdkt/iyVFtDHjF1rOsyQft5UlFac=
k8s.io/cloud-provider v0.34.0-alpha.0/go.mod h1:VJFyCyldpNmF2xzBJE+821KExRBcuNBRiiHD8WqOYLI=
k8s.io/cloud-provider-aws v1.27.0 h1:PF8YrH8QcN6JoXB3Xxlaz84SBDYMPunJuCc0cPuCWXA=
k8s.io/cloud-provider-aws v1.27.0/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA=
k8s.io/cloud-provider-gcp/providers v0.28.2 h1:I65pFTLNMQSj7YuW3Mg3pZIXmw0naCmF6TGAuz4/sZE=
k8s.io/cloud-provider-gcp/providers v0.28.2/go.mod h1:P8dxRvvLtX7xUwVUzA/QOqv8taCzBaVsVMnjnpjmYXE=
k8s.io/code-generator v0.33.0-beta.0 h1:QYiWYFUT9G7lnF1ucDYr/sZUaG/kptrooX2PJxEL+Go=
k8s.io/code-generator v0.33.0-beta.0/go.mod h1:RBvFpvqtyQygCBjMayNyYqdzy+89LdzqAx0Th+dgmzQ=
k8s.io/component-base v0.33.0-beta.0 h1:EEEzTLuzO1Li+YNHcDLQJgxX6AhfxAZqusYRGbIHfhg=
k8s.io/component-base v0.33.0-beta.0/go.mod h1:J9MYu3hIiNSNAhjiax9ktqplTpXPLP2RLXhzfJj1ahY=
k8s.io/component-helpers v0.33.0-beta.0 h1:K0ehdYOLgvS0o7pNJ1fxn1IvDT7Vnnc5IjQde9FioOk=
k8s.io/component-helpers v0.33.0-beta.0/go.mod h1:YiRLGS1YwfPKgRty5KPVgJdNgROn8btJ2KmfiDiIbxw=
k8s.io/controller-manager v0.33.0-beta.0 h1:iDBYPfvB1U5Z+E2baBwcU1ZBEPIMQ1Vna5ZODiuMtQU=
k8s.io/controller-manager v0.33.0-beta.0/go.mod h1:9YW7KwP/UENsnNGaD9+AoW+A4qorwzOj2HjO3+C1L0s=
k8s.io/cri-api v0.33.0-beta.0 h1:kUm8dbrFJhq433uCHbPUcPigcMhr+pJSQLssDQQ6qvU=
k8s.io/cri-api v0.33.0-beta.0/go.mod h1:AWeYLzfWgDAsuMDuL4Cdv4QN6w8I38Skhl7VL5Kt88Y=
k8s.io/cri-client v0.33.0-beta.0 h1:qXUQfgZjVOJJZi5da3sSLpSrzRhr0Fl3D9nUZNYjxRk=
k8s.io/cri-client v0.33.0-beta.0/go.mod h1:9+kkuFbEClFEziyj3+Ia4Tt3cGQpdL1QMAn5Jxp7i6Q=
k8s.io/csi-translation-lib v0.33.0-beta.0 h1:JMdubqoa2pBK7aT/7V7rbHEJZPjOWiy0rsl2EiT3Pi0=
k8s.io/csi-translation-lib v0.33.0-beta.0/go.mod h1:WAdrPAfrlpX+JzvZp03eBu0k+vxc+KztWOmQ83bXdC0=
k8s.io/dynamic-resource-allocation v0.33.0-beta.0 h1:RS+awbea4tBwdPSJCy+eOrBU7CU243uaQ78MYjIjPbU=
k8s.io/dynamic-resource-allocation v0.33.0-beta.0/go.mod h1:TMF5neWMSImYXSKmtGcBxi8+3r0r6zTKmWDq09uu354=
k8s.io/code-generator v0.34.0-alpha.0 h1:aM4APBz/eAR8Qw4RWiCpfocZ2O2UUTi0UqTfvalouHc=
k8s.io/code-generator v0.34.0-alpha.0/go.mod h1:lwzb0eIHnmHnkhcHbxXf87XR512Xm7mF2RHtDKEW71c=
k8s.io/component-base v0.34.0-alpha.0 h1:K/EyE1SX52rDrb+cpRn4MYh2pIJNzxMVqiiJbss5gFo=
k8s.io/component-base v0.34.0-alpha.0/go.mod h1:AwuvLTXn5h+ijia1CUBszmsbDNkOkipcwqz0IjGwUuU=
k8s.io/component-helpers v0.34.0-alpha.0 h1:/XBER9s8XN1dZdMjArYj+WvwKLy1U8pKBA5YLYyGC8Q=
k8s.io/component-helpers v0.34.0-alpha.0/go.mod h1:7v3yLCKYXbXWZV2Zx19k3WzKgmmjJaHJKyUiUuWr3vg=
k8s.io/controller-manager v0.34.0-alpha.0 h1:vdxEOA97ADUjIIXklwRAK/eWVJYLaqoCVDOXAEIo7YE=
k8s.io/controller-manager v0.34.0-alpha.0/go.mod h1:N+4fMmhvvwStvBV0cRA4fDWO41dRnFx7WKtnvZz6PpM=
k8s.io/cri-api v0.34.0-alpha.0 h1:s0rfuGqBJObds8cWrq5uNPWcUrC707NQv+JPb9x61Es=
k8s.io/cri-api v0.34.0-alpha.0/go.mod h1:OLQvT45OpIA+tv91ZrpuFIGY+Y2Ho23poS7n115Aocs=
k8s.io/cri-client v0.34.0-alpha.0 h1:rYxCLNZlecNrrUp++MGUSZa9vxacVcgYBHDlzEWjG9s=
k8s.io/cri-client v0.34.0-alpha.0/go.mod h1:Kd0X9qvWgiH1t4R43Br69c/GnFinKKDeah8dKU1NJAk=
k8s.io/csi-translation-lib v0.34.0-alpha.0 h1:IY8WTHF4tShtdq4Bhhz9MK55YETZediGLGjeqFNw4p4=
k8s.io/csi-translation-lib v0.34.0-alpha.0/go.mod h1:K8LDx63jcQWhWpgsl9LXzz3epM08y+BqB2hIC7vjLrs=
k8s.io/dynamic-resource-allocation v0.34.0-alpha.0 h1:xsDXrWpd++6RZM9Hv6dN7OWHWfbEXyI8kx9u0R/tzeI=
k8s.io/dynamic-resource-allocation v0.34.0-alpha.0/go.mod h1:3XaIVAbt0QApbx+AZRSvnJ5pN7N/ipXMo1KY1eCU86M=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.33.0-beta.0 h1:vuDqh9eIXJ8GdAekbWBTJ1zbGpZmn5455QE9W+Ynl1c=
k8s.io/kms v0.33.0-beta.0/go.mod h1:Y4hMZ7VWEUZIR0X6fX4HcoOaIFL7k/1sJUJp1kVu8ig=
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 h1:t0huyHnz6HsokckRxAF1bY0cqPFwzINKCL7yltEjZQc=
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kube-scheduler v0.33.0-beta.0 h1:h79DT1YNh7K8LGhsmZpRCPIuC086APmoKnFHvCEXOUo=
k8s.io/kube-scheduler v0.33.0-beta.0/go.mod h1:X1icvrNj/YJL5nG2p4nvgntJfz9++bRircE/dTHmofc=
k8s.io/kubectl v0.33.0-beta.0 h1:iUj3GRRJoNYDs+33Ty6OO+dk/oMui8g3fgyenx2H0as=
k8s.io/kubectl v0.33.0-beta.0/go.mod h1:nq1RKFPc1wBQKbqV4hKRfCCt+ND46a0Q1moiCm3+EE4=
k8s.io/kubelet v0.33.0-beta.0 h1:Lvv7unOCBbq2Pat3nhos7G7IO8FAxnV9ipu72t8wDHU=
k8s.io/kubelet v0.33.0-beta.0/go.mod h1:ZEs0MMzwF9iz3h+jYtRPRWzVWCDPyV2yBBzWgRB+gQw=
k8s.io/kubernetes v1.33.0-beta.0 h1:h27iikxaUwfS4Hx+owBk1XMkBGKWdW3HKNguibK+kjM=
k8s.io/kubernetes v1.33.0-beta.0/go.mod h1:/Ko9OUJBoF0BzbR/kPMr88qES8PeZ5Uw6H0yyKc/U+Y=
k8s.io/mount-utils v0.33.0-beta.0 h1:WfUvSZ+UX1jC1LAo2S53fa8PsnD+awl5owAOatxiX8s=
k8s.io/mount-utils v0.33.0-beta.0/go.mod h1:eYstRv7n35VQLbGB31hX5DepuowfDaOM/FulA1GTn9A=
k8s.io/kms v0.34.0-alpha.0 h1:URGYBkKIJW9+RzS3ayTKbELow8pfDrCxwnLzW8Nfsqk=
k8s.io/kms v0.34.0-alpha.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kube-scheduler v0.34.0-alpha.0 h1:KNKwFwyQWKj4CvIQiXyNhZThBYfQFOkn4XGZme1e4M8=
k8s.io/kube-scheduler v0.34.0-alpha.0/go.mod h1:8QwR6p8Gn64s5q24o4aDylieHJVkSdi7o+lrtsdBM3U=
k8s.io/kubectl v0.34.0-alpha.0 h1:j9e39A0PtVRTtkl5CvAmpowKD/ZT3dL6KgF/DNBHQ14=
k8s.io/kubectl v0.34.0-alpha.0/go.mod h1:Gte7DASB26vNZKWwipr8DDCxscefflco+uaW16/TLZ8=
k8s.io/kubelet v0.34.0-alpha.0 h1:N0VWnJB3XRLpdRy3FX4/CUD/nODpjyI/7ab0HvrK1Pc=
k8s.io/kubelet v0.34.0-alpha.0/go.mod h1:51ZqBsNuEIg/L675e4ddKY+gXxibR1e6jBAvwEmQW4M=
k8s.io/kubernetes v1.34.0-alpha.0 h1:CQgvuSXe2bBsVVnANySuiwHe/nW7orvxrLhMTUHWji0=
k8s.io/kubernetes v1.34.0-alpha.0/go.mod h1:0n2XbxETvcqjlkOAxsWMdi82xaVVrbv9iDm1IB4EkW4=
k8s.io/mount-utils v0.34.0-alpha.0 h1:iVBW1y5GZHFWyN811CBj1QOIUSE3SkvFkYWgnBRpLeo=
k8s.io/mount-utils v0.34.0-alpha.0/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=

View File

@ -118,12 +118,6 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot
drainabilityRules := rules.Default(deleteOptions)
var snapshotStore clustersnapshot.ClusterSnapshotStore = store.NewDeltaSnapshotStore(autoscalingOptions.ClusterSnapshotParallelism)
if autoscalingOptions.DynamicResourceAllocationEnabled {
// TODO(DRA): Remove this once DeltaSnapshotStore is integrated with DRA.
klog.Warningf("Using BasicSnapshotStore instead of DeltaSnapshotStore because DRA is enabled. Autoscaling performance/scalability might be decreased.")
snapshotStore = store.NewBasicSnapshotStore()
}
opts := core.AutoscalerOptions{
AutoscalingOptions: autoscalingOptions,
FrameworkHandle: fwHandle,

View File

@ -20,6 +20,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
)
@ -35,14 +36,9 @@ type CustomResourceTarget struct {
type CustomResourcesProcessor interface {
// FilterOutNodesWithUnreadyResources removes nodes that should have a custom resource, but don't have
// it in allocatable from ready nodes list and updates their status to unready on all nodes list.
FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node) ([]*apiv1.Node, []*apiv1.Node)
FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, draSnapshot *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node)
// GetNodeResourceTargets returns mapping of resource names to their targets.
GetNodeResourceTargets(context *context.AutoscalingContext, node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError)
// CleanUp cleans up processor's internal structures.
CleanUp()
}
// NewDefaultCustomResourcesProcessor returns a default instance of CustomResourcesProcessor.
func NewDefaultCustomResourcesProcessor() CustomResourcesProcessor {
return &GpuCustomResourcesProcessor{}
}

View File

@ -0,0 +1,70 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package customresources
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
)
// DefaultCustomResourcesProcessor handles multiple custom resource processors and
// executes them in order.
type DefaultCustomResourcesProcessor struct {
customResourcesProcessors []CustomResourcesProcessor
}
// NewDefaultCustomResourcesProcessor returns an instance of DefaultCustomResourcesProcessor.
func NewDefaultCustomResourcesProcessor(draEnabled bool) CustomResourcesProcessor {
customProcessors := []CustomResourcesProcessor{&GpuCustomResourcesProcessor{}}
if draEnabled {
customProcessors = append(customProcessors, &DraCustomResourcesProcessor{})
}
return &DefaultCustomResourcesProcessor{customProcessors}
}
// FilterOutNodesWithUnreadyResources calls the corresponding method for internal custom resources processors in order.
func (p *DefaultCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, draSnapshot *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
newAllNodes := allNodes
newReadyNodes := readyNodes
for _, processor := range p.customResourcesProcessors {
newAllNodes, newReadyNodes = processor.FilterOutNodesWithUnreadyResources(context, newAllNodes, newReadyNodes, draSnapshot)
}
return newAllNodes, newReadyNodes
}
// GetNodeResourceTargets calls the corresponding method for internal custom resources processors in order.
func (p *DefaultCustomResourcesProcessor) GetNodeResourceTargets(context *context.AutoscalingContext, node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError) {
customResourcesTargets := []CustomResourceTarget{}
for _, processor := range p.customResourcesProcessors {
targets, err := processor.GetNodeResourceTargets(context, node, nodeGroup)
if err != nil {
return nil, err
}
customResourcesTargets = append(customResourcesTargets, targets...)
}
return customResourcesTargets, nil
}
// CleanUp cleans up all internal custom resources processors.
func (p *DefaultCustomResourcesProcessor) CleanUp() {
for _, processor := range p.customResourcesProcessors {
processor.CleanUp()
}
}

View File

@ -0,0 +1,198 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package customresources
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/context"
utils "k8s.io/autoscaler/cluster-autoscaler/utils/test"
)
func TestDefaultProcessorFilterOut(t *testing.T) {
processor := DefaultCustomResourcesProcessor{[]CustomResourcesProcessor{
&mockCustomResourcesProcessor{nodeMark: "p1"},
&mockCustomResourcesProcessor{nodeMark: "p2"},
&mockCustomResourcesProcessor{nodeMark: "p3"},
}}
testCases := map[string]struct {
allNodes []*apiv1.Node
nodesInitialReadiness map[string]bool
expectedReadyNodes map[string]bool
}{
"filtering one node by one processor": {
allNodes: []*apiv1.Node{
utils.BuildTestNode("p1_node_1", 500, 100),
utils.BuildTestNode("node_2", 500, 100),
},
nodesInitialReadiness: map[string]bool{
"p1_node_1": true,
"node_2": true,
},
expectedReadyNodes: map[string]bool{
"node_2": true,
},
},
"filtering multiple nodes by one processor": {
allNodes: []*apiv1.Node{
utils.BuildTestNode("p1_node_1", 500, 100),
utils.BuildTestNode("p1_node_2", 500, 100),
utils.BuildTestNode("node_3", 500, 100),
},
nodesInitialReadiness: map[string]bool{
"p1_node_1": true,
"p1_node_2": true,
"node_3": false,
},
expectedReadyNodes: map[string]bool{},
},
"filtering one node by multiple processors": {
allNodes: []*apiv1.Node{
utils.BuildTestNode("p1_p3_node_1", 500, 100),
utils.BuildTestNode("p1_node_2", 500, 100),
utils.BuildTestNode("node_3", 500, 100),
},
nodesInitialReadiness: map[string]bool{
"p1_node_1": true,
"p1_node_2": false,
"node_3": false,
},
expectedReadyNodes: map[string]bool{},
},
"filtering multiple nodes by multiple processor": {
allNodes: []*apiv1.Node{
utils.BuildTestNode("p1_node_1", 500, 100),
utils.BuildTestNode("p1_node_2", 500, 100),
utils.BuildTestNode("node_3", 500, 100),
utils.BuildTestNode("node_4", 500, 100),
utils.BuildTestNode("p2_node_5", 500, 100),
utils.BuildTestNode("p3_node_6", 500, 100),
},
nodesInitialReadiness: map[string]bool{
"p1_node_1": false,
"p1_node_2": true,
"node_3": false,
"node_4": true,
"p2_node_5": true,
"p3_node_6": true,
},
expectedReadyNodes: map[string]bool{
"node_4": true,
},
},
}
for tcName, tc := range testCases {
t.Run(tcName, func(t *testing.T) {
readyNodes := []*apiv1.Node{}
for _, node := range tc.allNodes {
if tc.nodesInitialReadiness[node.Name] {
readyNodes = append(readyNodes, node)
}
}
resultedAllNodes, resultedReadyNodes := processor.FilterOutNodesWithUnreadyResources(nil, tc.allNodes, readyNodes, nil)
assert.ElementsMatch(t, tc.allNodes, resultedAllNodes)
assert.True(t, len(resultedReadyNodes) == len(tc.expectedReadyNodes))
for _, node := range resultedReadyNodes {
assert.True(t, tc.expectedReadyNodes[node.Name])
}
})
}
}
func TestDefaultProcessorGetNodeResourceTargets(t *testing.T) {
processor := DefaultCustomResourcesProcessor{[]CustomResourcesProcessor{
&mockCustomResourcesProcessor{nodeMark: "p1", customResourceTargetsToAdd: []string{"p1_R1", "p1_R2"}, customResourceTargetsQuantity: 1},
&mockCustomResourcesProcessor{nodeMark: "p2", customResourceTargetsToAdd: []string{"p2_R1"}, customResourceTargetsQuantity: 2},
&mockCustomResourcesProcessor{nodeMark: "p3", customResourceTargetsToAdd: []string{"p3_R1"}, customResourceTargetsQuantity: 3},
}}
testCases := map[string]struct {
node *apiv1.Node
expectedResources []CustomResourceTarget
}{
"single processor": {
node: utils.BuildTestNode("p1", 500, 100),
expectedResources: []CustomResourceTarget{
{ResourceType: "p1_R1", ResourceCount: 1},
{ResourceType: "p1_R2", ResourceCount: 1},
},
},
"many processors": {
node: utils.BuildTestNode("p1_p3", 500, 100),
expectedResources: []CustomResourceTarget{
{ResourceType: "p1_R1", ResourceCount: 1},
{ResourceType: "p1_R2", ResourceCount: 1},
{ResourceType: "p3_R1", ResourceCount: 3},
},
},
"all processors": {
node: utils.BuildTestNode("p1_p2_p3", 500, 100),
expectedResources: []CustomResourceTarget{
{ResourceType: "p1_R1", ResourceCount: 1},
{ResourceType: "p1_R2", ResourceCount: 1},
{ResourceType: "p2_R1", ResourceCount: 2},
{ResourceType: "p3_R1", ResourceCount: 3},
},
},
}
for tcName, tc := range testCases {
t.Run(tcName, func(t *testing.T) {
customResourceTarget, _ := processor.GetNodeResourceTargets(nil, tc.node, nil)
assert.ElementsMatch(t, customResourceTarget, tc.expectedResources)
})
}
}
type mockCustomResourcesProcessor struct {
nodeMark string
customResourceTargetsToAdd []string
customResourceTargetsQuantity int64
}
func (m *mockCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(_ *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, _ *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
filteredReadyNodes := []*apiv1.Node{}
for _, node := range readyNodes {
if !strings.Contains(node.Name, m.nodeMark) {
filteredReadyNodes = append(filteredReadyNodes, node)
}
}
return allNodes, filteredReadyNodes
}
func (m *mockCustomResourcesProcessor) GetNodeResourceTargets(_ *context.AutoscalingContext, node *apiv1.Node, _ cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError) {
result := []CustomResourceTarget{}
if strings.Contains(node.Name, m.nodeMark) {
for _, rt := range m.customResourceTargetsToAdd {
result = append(result, CustomResourceTarget{ResourceType: rt, ResourceCount: m.customResourceTargetsQuantity})
}
}
return result, nil
}
func (m *mockCustomResourcesProcessor) CleanUp() {
}

View File

@ -0,0 +1,139 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package customresources
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
"k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
"k8s.io/klog/v2"
)
// DraCustomResourcesProcessor handles DRA custom resource. It assumes,
// that the DRA resources may not become allocatable immediately after the node creation.
type DraCustomResourcesProcessor struct {
}
// FilterOutNodesWithUnreadyResources removes nodes that should have DRA resource, but don't have
// it in allocatable from ready nodes list and updates their status to unready on all nodes list.
func (p *DraCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, draSnapshot *snapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
newAllNodes := make([]*apiv1.Node, 0)
newReadyNodes := make([]*apiv1.Node, 0)
nodesWithUnreadyDraResources := make(map[string]*apiv1.Node)
if draSnapshot == nil {
klog.Warningf("Cannot filter out nodes with unready DRA resources. The DRA snapshot is nil. Processing will be skipped.")
return allNodes, readyNodes
}
for _, node := range readyNodes {
ng, err := context.CloudProvider.NodeGroupForNode(node)
if err != nil {
newReadyNodes = append(newReadyNodes, node)
klog.Warningf("Failed to get node group for node %s, Skipping DRA readiness check and keeping node in ready list. Error: %v", node.Name, err)
continue
}
if ng == nil {
newReadyNodes = append(newReadyNodes, node)
continue
}
nodeInfo, err := ng.TemplateNodeInfo()
if err != nil {
newReadyNodes = append(newReadyNodes, node)
klog.Warningf("Failed to get template node info for node group %s with error: %v", ng.Id(), err)
continue
}
nodeResourcesSlices, _ := draSnapshot.NodeResourceSlices(node.Name)
if isEqualResourceSlices(nodeResourcesSlices, nodeInfo.LocalResourceSlices) {
newReadyNodes = append(newReadyNodes, node)
} else {
nodesWithUnreadyDraResources[node.Name] = kubernetes.GetUnreadyNodeCopy(node, kubernetes.ResourceUnready)
}
}
// Override any node with unready DRA resources with its "unready" copy
for _, node := range allNodes {
if newNode, found := nodesWithUnreadyDraResources[node.Name]; found {
newAllNodes = append(newAllNodes, newNode)
} else {
newAllNodes = append(newAllNodes, node)
}
}
return newAllNodes, newReadyNodes
}
type resourceSliceSpecs struct {
driver string
pool string
}
func isEqualResourceSlices(nodeResourcesSlices []*v1beta1.ResourceSlice, templateResourcesSlices []*v1beta1.ResourceSlice) bool {
tempSlicesByPools := getDevicesBySpecs(templateResourcesSlices)
nodeSlicesByPools := getDevicesBySpecs(nodeResourcesSlices)
for templSpecs, tempDevicesSet := range tempSlicesByPools {
matched := false
for nodeSpecs, nodeDevicesSet := range nodeSlicesByPools {
if templSpecs.driver == nodeSpecs.driver && nodeDevicesSet.Equal(tempDevicesSet) {
delete(nodeSlicesByPools, nodeSpecs)
matched = true
break
}
}
if !matched {
return false
}
}
return true
}
func getDevicesBySpecs(resourcesSlices []*v1beta1.ResourceSlice) map[resourceSliceSpecs]sets.Set[string] {
slicesGroupedByPoolAndDriver := make(map[resourceSliceSpecs]sets.Set[string])
for _, rs := range resourcesSlices {
rsSpecs := resourceSliceSpecs{
pool: rs.Spec.Pool.Name,
driver: rs.Spec.Driver,
}
slicesGroupedByPoolAndDriver[rsSpecs] = getResourceSliceDevicesSet(rs)
}
return slicesGroupedByPoolAndDriver
}
func getResourceSliceDevicesSet(resourcesSlice *v1beta1.ResourceSlice) sets.Set[string] {
devices := sets.New[string]()
for _, device := range resourcesSlice.Spec.Devices {
devices.Insert(device.Name)
}
return devices
}
// GetNodeResourceTargets returns the resource targets for DRA resource slices, not implemented.
func (p *DraCustomResourcesProcessor) GetNodeResourceTargets(_ *context.AutoscalingContext, _ *apiv1.Node, _ cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError) {
// TODO(DRA): Figure out resource limits for DRA here.
return []CustomResourceTarget{}, nil
}
// CleanUp cleans up processor's internal structures.
func (p *DraCustomResourcesProcessor) CleanUp() {
}

View File

@ -0,0 +1,399 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package customresources
import (
"fmt"
"testing"
"time"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/store"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/context"
utils "k8s.io/autoscaler/cluster-autoscaler/utils/test"
)
func TestFilterOutNodesWithUnreadyDRAResources(t *testing.T) {
testCases := map[string]struct {
nodeGroupsAllNodes map[string][]*apiv1.Node
nodeGroupsTemplatesSlices map[string][]*resourceapi.ResourceSlice
nodesSlices map[string][]*resourceapi.ResourceSlice
expectedNodesReadiness map[string]bool
}{
"1 DRA node group all totally ready": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 1}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": true,
},
},
"1 DRA node group, one initially unready": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", false),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 1}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": false,
},
},
"1 DRA node group, one initially ready with unready reasource": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 0}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": false,
},
},
"1 DRA node group, one initially ready with more reasources than expected": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 3}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": false,
},
},
"1 DRA node group, one initially ready with no slices": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": {},
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 3}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": false,
"node_2_Dra_Ready": false,
},
},
"1 DRA node group, single driver multiple pools, only one published": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": buildNodeResourceSlices("node_2_Dra_Ready", "driver", []int{2}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": false,
},
},
"1 DRA node group, single driver multiple pools, more pools published including template pools": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_2_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_2_Dra_Ready": buildNodeResourceSlices("node_2_Dra_Ready", "driver", []int{2, 2, 2, 2}),
},
expectedNodesReadiness: map[string]bool{
"node_2_Dra_Ready": true,
},
},
"1 DRA node group, single driver multiple pools, more pools published not including template pools": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": buildNodeResourceSlices("node_1_Dra_Ready", "driver", []int{2, 2, 1, 2}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": false,
},
},
"2 node groups, one DRA with 1 reasource unready node": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
buildTestNode("node_3_Dra_Unready", true),
},
"ng2": {
buildTestNode("node_4_NonDra_Ready", true),
buildTestNode("node_5_NonDra_Unready", false),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{2, 2}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{2, 2}),
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{2, 2}),
"node_3_Dra_Unready": createNodeResourceSlices("node_3_Dra_Unready", []int{2, 1}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": true,
"node_3_Dra_Unready": false,
"node_4_NonDra_Ready": true,
"node_5_NonDra_Unready": false,
},
},
"2 DRA node groups, each with 1 reasource unready node": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
buildTestNode("node_3_Dra_Unready", true),
},
"ng2": {
buildTestNode("node_4_Dra_Ready", true),
buildTestNode("node_5_Dra_Unready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{2, 2}),
"ng2": createNodeResourceSlices("ng2_template", []int{3, 3}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{2, 2}),
"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{2, 2}),
"node_3_Dra_Unready": createNodeResourceSlices("node_3_Dra_Unready", []int{2, 1}),
"node_4_Dra_Ready": createNodeResourceSlices("node_4_Dra_Ready", []int{3, 3}),
"node_5_Dra_Unready": createNodeResourceSlices("node_5_Dra_Unready", []int{2, 1}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": true,
"node_3_Dra_Unready": false,
"node_4_Dra_Ready": true,
"node_5_Dra_Unready": false,
},
},
"2 DRA node group, single driver multiple pools, more pools published including template pools": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1_Dra_Ready", true),
buildTestNode("node_2_Dra_Ready", true),
},
"ng2": {
buildTestNode("node_3_Dra_Ready", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
"ng2": buildNodeResourceSlices("ng2_template", "driver", []int{1, 1}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1_Dra_Ready": buildNodeResourceSlices("node_1_Dra_Ready", "driver", []int{2, 2, 2, 2}),
"node_2_Dra_Ready": buildNodeResourceSlices("node_2_Dra_Ready", "driver", []int{2, 2, 2}),
"node_3_Dra_Ready": buildNodeResourceSlices("node_3_Dra_Ready", "driver", []int{1, 1, 1}),
},
expectedNodesReadiness: map[string]bool{
"node_1_Dra_Ready": true,
"node_2_Dra_Ready": true,
"node_3_Dra_Ready": true,
},
},
"All together": {
nodeGroupsAllNodes: map[string][]*apiv1.Node{
"ng1": {
buildTestNode("node_1", true),
buildTestNode("node_2", true),
buildTestNode("node_3", true),
},
"ng2": {
buildTestNode("node_4", false),
buildTestNode("node_5", true),
},
"ng3": {
buildTestNode("node_6", false),
buildTestNode("node_7", true),
},
},
nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
"ng1": createNodeResourceSlices("ng1_template", []int{2, 2}),
"ng2": createNodeResourceSlices("ng2_template", []int{3, 3}),
},
nodesSlices: map[string][]*resourceapi.ResourceSlice{
"node_1": createNodeResourceSlices("node_1", []int{2, 2, 2}),
"node_2": createNodeResourceSlices("node_2", []int{1}),
"node_3": createNodeResourceSlices("node_3", []int{1, 2}),
"node_4": createNodeResourceSlices("node_4", []int{3, 3}),
"node_5": {},
},
expectedNodesReadiness: map[string]bool{
"node_1": true,
"node_2": false,
"node_3": false,
"node_4": false,
"node_5": false,
"node_6": false,
"node_7": true,
},
},
}
for tcName, tc := range testCases {
t.Run(tcName, func(t *testing.T) {
provider := testprovider.NewTestCloudProviderBuilder().Build()
machineTemplates := map[string]*framework.NodeInfo{}
initialAllNodes := []*apiv1.Node{}
initialReadyNodes := []*apiv1.Node{}
for ng, nodes := range tc.nodeGroupsAllNodes {
machineName := fmt.Sprintf("%s_machine_template", ng)
if rs, found := tc.nodeGroupsTemplatesSlices[ng]; found {
machineTemplates[machineName] = framework.NewNodeInfo(buildTestNode(fmt.Sprintf("%s_template", ng), true), rs)
} else {
machineTemplates[machineName] = framework.NewTestNodeInfo(buildTestNode(fmt.Sprintf("%s_template", ng), true))
}
provider.AddAutoprovisionedNodeGroup(ng, 0, 20, len(nodes), machineName)
for _, node := range nodes {
initialAllNodes = append(initialAllNodes, node)
if getNodeReadiness(node) {
initialReadyNodes = append(initialReadyNodes, node)
}
provider.AddNode(ng, node)
}
}
provider.SetMachineTemplates(machineTemplates)
draSnapshot := drasnapshot.NewSnapshot(nil, tc.nodesSlices, nil, nil)
clusterSnapshotStore := store.NewBasicSnapshotStore()
clusterSnapshotStore.SetClusterState([]*apiv1.Node{}, []*apiv1.Pod{}, draSnapshot)
clusterSnapshot, _, _ := testsnapshot.NewCustomTestSnapshotAndHandle(clusterSnapshotStore)
ctx := &context.AutoscalingContext{CloudProvider: provider, ClusterSnapshot: clusterSnapshot}
processor := DraCustomResourcesProcessor{}
newAllNodes, newReadyNodes := processor.FilterOutNodesWithUnreadyResources(ctx, initialAllNodes, initialReadyNodes, draSnapshot)
readyNodes := make(map[string]bool)
for _, node := range newReadyNodes {
readyNodes[node.Name] = true
}
assert.True(t, len(newAllNodes) == len(initialAllNodes), "Total number of nodes should not change")
for _, node := range newAllNodes {
gotReadiness := getNodeReadiness(node)
assert.Equal(t, tc.expectedNodesReadiness[node.Name], gotReadiness)
assert.Equal(t, gotReadiness, readyNodes[node.Name])
}
})
}
}
func createNodeResourceSlices(nodeName string, numberOfDevicesInSlices []int) []*resourceapi.ResourceSlice {
return buildNodeResourceSlices(nodeName, "", numberOfDevicesInSlices)
}
func buildNodeResourceSlices(nodeName, driverName string, numberOfDevicesInSlices []int) []*resourceapi.ResourceSlice {
numberOfSlices := len(numberOfDevicesInSlices)
resourceSlices := []*resourceapi.ResourceSlice{}
for sliceIndex := range numberOfSlices {
devices := []resourceapi.Device{}
for deviceIndex := range numberOfDevicesInSlices[sliceIndex] {
devices = append(devices, resourceapi.Device{Name: fmt.Sprintf("%d_%d", sliceIndex, deviceIndex)})
}
if driverName == "" {
driverName = fmt.Sprintf("driver_%d", sliceIndex)
}
spec := resourceapi.ResourceSliceSpec{
NodeName: nodeName,
Driver: driverName,
Pool: resourceapi.ResourcePool{Name: fmt.Sprintf("%s_pool_%d", nodeName, sliceIndex)},
Devices: devices,
}
resourceSlices = append(resourceSlices, &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: nodeName}, Spec: spec})
}
return resourceSlices
}
func buildTestNode(nodeName string, ready bool) *apiv1.Node {
node := utils.BuildTestNode(nodeName, 500, 100)
utils.SetNodeReadyState(node, ready, time.Now().Add(-5*time.Minute))
return node
}
func getNodeReadiness(node *apiv1.Node) bool {
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == apiv1.NodeReady {
return node.Status.Conditions[i].Status == apiv1.ConditionTrue
}
}
return false
}

View File

@ -20,6 +20,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
"k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@ -36,7 +37,7 @@ type GpuCustomResourcesProcessor struct {
// it in allocatable from ready nodes list and updates their status to unready on all nodes list.
// This is a hack/workaround for nodes with GPU coming up without installed drivers, resulting
// in GPU missing from their allocatable and capacity.
func (p *GpuCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node) ([]*apiv1.Node, []*apiv1.Node) {
func (p *GpuCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, _ *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
newAllNodes := make([]*apiv1.Node, 0)
newReadyNodes := make([]*apiv1.Node, 0)
nodesWithUnreadyGpu := make(map[string]*apiv1.Node)

View File

@ -170,10 +170,10 @@ func TestFilterOutNodesWithUnreadyResources(t *testing.T) {
nodeNoGpuUnready,
}
processor := NewDefaultCustomResourcesProcessor()
processor := GpuCustomResourcesProcessor{}
provider := testprovider.NewTestCloudProviderBuilder().Build()
ctx := &context.AutoscalingContext{CloudProvider: provider}
newAllNodes, newReadyNodes := processor.FilterOutNodesWithUnreadyResources(ctx, initialAllNodes, initialReadyNodes)
newAllNodes, newReadyNodes := processor.FilterOutNodesWithUnreadyResources(ctx, initialAllNodes, initialReadyNodes, nil)
foundInReady := make(map[string]bool)
for _, node := range newReadyNodes {

View File

@ -97,7 +97,7 @@ func DefaultProcessors(options config.AutoscalingOptions) *AutoscalingProcessors
NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(),
AsyncNodeGroupStateChecker: asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker(),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(options.DynamicResourceAllocationEnabled),
ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(),

View File

@ -52,7 +52,7 @@ func NewTestProcessors(context *context.AutoscalingContext) *processors.Autoscal
NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.NodeGroupDefaults),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(true),
ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(),
ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(),
ScaleStateNotifier: nodegroupchange.NewNodeGroupChangeObserversList(),

View File

@ -102,11 +102,11 @@ func compareStates(t *testing.T, a, b snapshotState) {
t.Errorf("ResourceClaims().List(): unexpected diff (-want +got): %s", diff)
}
aSlices, err := a.draSnapshot.ResourceSlices().List()
aSlices, err := a.draSnapshot.ResourceSlices().ListWithDeviceTaintRules()
if err != nil {
t.Errorf("ResourceSlices().List(): unexpected error: %v", err)
}
bSlices, err := b.draSnapshot.ResourceSlices().List()
bSlices, err := b.draSnapshot.ResourceSlices().ListWithDeviceTaintRules()
if err != nil {
t.Errorf("ResourceSlices().List(): unexpected error: %v", err)
}

View File

@ -24,6 +24,7 @@ type snapshotSliceLister struct {
snapshot *Snapshot
}
func (sl snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
// TODO(DRA): Actually handle the taint rules.
func (sl snapshotSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) {
return sl.snapshot.listResourceSlices(), nil
}

View File

@ -76,7 +76,7 @@ func TestSnapshotSliceListerList(t *testing.T) {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(nil, tc.localSlices, tc.globalSlices, nil)
var resourceSliceLister schedulerframework.ResourceSliceLister = snapshot.ResourceSlices()
slices, err := resourceSliceLister.List()
slices, err := resourceSliceLister.ListWithDeviceTaintRules()
if err != nil {
t.Fatalf("snapshotSliceLister.List(): got unexpected error: %v", err)
}

View File

@ -483,7 +483,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
}
if tc.wantAllSlices != nil {
allSlices, err := snapshot.ResourceSlices().List()
allSlices, err := snapshot.ResourceSlices().ListWithDeviceTaintRules()
if err != nil {
t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
}

View File

@ -145,7 +145,7 @@ func (u unsetResourceClaimTracker) AssumedClaimRestore(namespace, claimName stri
klog.Errorf("lister not set in delegate")
}
func (u unsetResourceSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
func (u unsetResourceSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) {
return nil, fmt.Errorf("lister not set in delegate")
}

View File

@ -17,4 +17,4 @@ limitations under the License.
package version
// ClusterAutoscalerVersion contains version of CA.
const ClusterAutoscalerVersion = "1.33.0-beta.0"
const ClusterAutoscalerVersion = "1.34.0-alpha.0"