Fix unit-tests

This commit is contained in:
Bartłomiej Wróblewski 2025-09-15 15:31:11 +00:00
parent 3e0578bdeb
commit a366e629ce
14 changed files with 141 additions and 121 deletions

View File

@ -316,7 +316,7 @@ func TestTemplateNodeInfo(t *testing.T) {
Return(fakeServerTypezx45f(), nil)
obj, err := makeFakeNodeGroup(t, testclient).TemplateNodeInfo()
require.NoError(t, err)
assert.Equal(t, fakeResource(), obj.ToScheduler().Allocatable)
assert.Equal(t, fakeResource(), obj.ToScheduler().GetAllocatable())
}
func TestNodeGroupErrors(t *testing.T) {

View File

@ -1648,8 +1648,8 @@ func TestNodeGroupTemplateNodeInfo(t *testing.T) {
t.Errorf("Expected the number of DRA devices in ResourceSlice to have: %d, but got: %d", config.expectedResourceSlice.gpuCount, len(resourceslice.Spec.Devices))
}
for _, device := range resourceslice.Spec.Devices {
if *device.Basic.Attributes["type"].StringValue != config.expectedResourceSlice.deviceType {
t.Errorf("Expected device type to have: %s, but got: %s", config.expectedResourceSlice.deviceType, *device.Basic.Attributes["type"].StringValue)
if *device.Attributes["type"].StringValue != config.expectedResourceSlice.deviceType {
t.Errorf("Expected device type to have: %s, but got: %s", config.expectedResourceSlice.deviceType, *device.Attributes["type"].StringValue)
}
}
}

View File

@ -336,18 +336,16 @@ func TestAnnotations(t *testing.T) {
},
Spec: resourceapi.ResourceSliceSpec{
Driver: draDriver,
NodeName: testNodeName,
NodeName: &testNodeName,
Pool: resourceapi.ResourcePool{
Name: testNodeName,
},
Devices: []resourceapi.Device{
{
Name: "gpu-0",
Basic: &resourceapi.BasicDevice{
Attributes: map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
"type": {
StringValue: ptr.To(GpuDeviceType),
},
Attributes: map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
"type": {
StringValue: ptr.To(GpuDeviceType),
},
},
},

View File

@ -396,19 +396,19 @@ func TestTemplateNodeInfo(t *testing.T) {
t.Fatal(err)
}
if nodeInfo.ToScheduler().Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() {
if nodeInfo.ToScheduler().GetAllocatable().GetMilliCPU() != ng.resources.Cpu().MilliValue() {
t.Fatalf("expected nodeInfo to have %v MilliCPU, got %v",
ng.resources.Cpu().MilliValue(), nodeInfo.ToScheduler().Allocatable.MilliCPU)
ng.resources.Cpu().MilliValue(), nodeInfo.ToScheduler().GetAllocatable().GetMilliCPU())
}
if nodeInfo.ToScheduler().Allocatable.Memory != ng.resources.Memory().Value() {
if nodeInfo.ToScheduler().GetAllocatable().GetMemory() != ng.resources.Memory().Value() {
t.Fatalf("expected nodeInfo to have %v Memory, got %v",
ng.resources.Memory().Value(), nodeInfo.ToScheduler().Allocatable.Memory)
ng.resources.Memory().Value(), nodeInfo.ToScheduler().GetAllocatable().GetMemory())
}
if nodeInfo.ToScheduler().Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() {
if nodeInfo.ToScheduler().GetAllocatable().GetEphemeralStorage() != ng.resources.StorageEphemeral().Value() {
t.Fatalf("expected nodeInfo to have %v ephemeral storage, got %v",
ng.resources.StorageEphemeral().Value(), nodeInfo.ToScheduler().Allocatable.EphemeralStorage)
ng.resources.StorageEphemeral().Value(), nodeInfo.ToScheduler().GetAllocatable().GetEphemeralStorage())
}
}

View File

@ -603,15 +603,17 @@ func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string,
selectors = append(selectors, resourceapi.DeviceSelector{CEL: &resourceapi.CELDeviceSelector{Expression: selector}})
}
deviceRequest := resourceapi.DeviceRequest{
Name: request.name,
DeviceClassName: "default-class",
Selectors: selectors,
Name: request.name,
Exactly: &resourceapi.ExactDeviceRequest{
DeviceClassName: "default-class",
Selectors: selectors,
},
}
if request.all {
deviceRequest.AllocationMode = resourceapi.DeviceAllocationModeAll
deviceRequest.Exactly.AllocationMode = resourceapi.DeviceAllocationModeAll
} else {
deviceRequest.AllocationMode = resourceapi.DeviceAllocationModeExactCount
deviceRequest.Count = request.count
deviceRequest.Exactly.AllocationMode = resourceapi.DeviceAllocationModeExactCount
deviceRequest.Exactly.Count = request.count
}
deviceRequests = append(deviceRequests, deviceRequest)
}
@ -711,9 +713,10 @@ func testResourceSlices(driver, poolName string, poolSliceCount, poolGen int64,
}
if avail.node != "" {
slice.Spec.NodeName = avail.node
slice.Spec.NodeName = &avail.node
} else if avail.all {
slice.Spec.AllNodes = true
v := true
slice.Spec.AllNodes = &v
} else if len(avail.nodes) > 0 {
slice.Spec.NodeSelector = &apiv1.NodeSelector{
NodeSelectorTerms: []apiv1.NodeSelectorTerm{
@ -728,18 +731,16 @@ func testResourceSlices(driver, poolName string, poolSliceCount, poolGen int64,
var devices []resourceapi.Device
for _, deviceDef := range deviceDefs {
device := resourceapi.Device{
Name: deviceDef.name,
Basic: &resourceapi.BasicDevice{
Attributes: map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{},
Capacity: map[resourceapi.QualifiedName]resourceapi.DeviceCapacity{},
},
Name: deviceDef.name,
Attributes: map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{},
Capacity: map[resourceapi.QualifiedName]resourceapi.DeviceCapacity{},
}
for name, val := range deviceDef.attributes {
val := val
device.Basic.Attributes[resourceapi.QualifiedName(driver+"/"+name)] = resourceapi.DeviceAttribute{StringValue: &val}
device.Attributes[resourceapi.QualifiedName(driver+"/"+name)] = resourceapi.DeviceAttribute{StringValue: &val}
}
for name, quantity := range deviceDef.capacity {
device.Basic.Capacity[resourceapi.QualifiedName(name)] = resourceapi.DeviceCapacity{Value: resource.MustParse(quantity)}
device.Capacity[resourceapi.QualifiedName(name)] = resourceapi.DeviceCapacity{Value: resource.MustParse(quantity)}
}
devices = append(devices, device)
}

View File

@ -35,21 +35,20 @@ import (
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
)
func createTestResourceSlice(nodeName string, devicesPerSlice int, slicesPerNode int, driver string, device resourceapi.BasicDevice) *resourceapi.ResourceSlice {
func createTestResourceSlice(nodeName string, devicesPerSlice int, slicesPerNode int, driver string) *resourceapi.ResourceSlice {
sliceId := uuid.New().String()
name := fmt.Sprintf("rs-%s", sliceId)
uid := types.UID(fmt.Sprintf("rs-%s-uid", sliceId))
devices := make([]resourceapi.Device, devicesPerSlice)
for deviceIndex := 0; deviceIndex < devicesPerSlice; deviceIndex++ {
deviceName := fmt.Sprintf("rs-dev-%s-%d", sliceId, deviceIndex)
deviceCopy := device
devices[deviceIndex] = resourceapi.Device{Name: deviceName, Basic: &deviceCopy}
devices[deviceIndex] = resourceapi.Device{Name: deviceName}
}
return &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid},
Spec: resourceapi.ResourceSliceSpec{
NodeName: nodeName,
NodeName: &nodeName,
Driver: driver,
Pool: resourceapi.ResourcePool{
Name: nodeName,
@ -69,11 +68,13 @@ func createTestResourceClaim(requestsPerClaim int, devicesPerRequest int, driver
requests := make([]resourceapi.DeviceRequest, requestsPerClaim)
for requestIndex := 0; requestIndex < requestsPerClaim; requestIndex++ {
requests[requestIndex] = resourceapi.DeviceRequest{
Name: fmt.Sprintf("deviceRequest-%d", requestIndex),
DeviceClassName: deviceClass,
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: expression}}},
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
Count: int64(devicesPerRequest),
Name: fmt.Sprintf("deviceRequest-%d", requestIndex),
Exactly: &resourceapi.ExactDeviceRequest{
DeviceClassName: deviceClass,
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: expression}}},
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
Count: int64(devicesPerRequest),
},
}
}
@ -102,7 +103,7 @@ func allocateResourceSlicesForClaim(claim *resourceapi.ResourceClaim, nodeName s
allocationLoop:
for _, request := range claim.Spec.Devices.Requests {
for devicesRequired := request.Count; devicesRequired > 0; devicesRequired-- {
for devicesRequired := request.Exactly.Count; devicesRequired > 0; devicesRequired-- {
// Skipping resource slices until we find one with at least a single device available
for sliceIndex < len(slices) && deviceIndex >= len(slices[sliceIndex].Spec.Devices) {
sliceIndex++
@ -272,7 +273,7 @@ func BenchmarkScheduleRevert(b *testing.B) {
for nodeIndex := 0; nodeIndex < maxNodesCount; nodeIndex++ {
nodeName := fmt.Sprintf("node-%d", nodeIndex)
node := BuildTestNode(nodeName, 10000, 10000)
nodeSlice := createTestResourceSlice(node.Name, devicesPerSlice, 1, driverName, resourceapi.BasicDevice{})
nodeSlice := createTestResourceSlice(node.Name, devicesPerSlice, 1, driverName)
nodeInfo := framework.NewNodeInfo(node, []*resourceapi.ResourceSlice{nodeSlice})
sharedClaim := createTestResourceClaim(devicesPerSlice, 1, driverName, deviceClassName)

View File

@ -204,32 +204,32 @@ func validTestCases(t *testing.T, snapshotName string) []modificationTestCase {
{
ObjectMeta: metav1.ObjectMeta{Name: "slice1", UID: "slice1Uid"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: node.Name,
NodeName: &node.Name,
Driver: "driver.foo.com",
Pool: resourceapi.ResourcePool{
Name: "pool1",
ResourceSliceCount: 1,
},
Devices: []resourceapi.Device{
{Name: "dev1", Basic: &resourceapi.BasicDevice{}},
{Name: "dev2", Basic: &resourceapi.BasicDevice{}},
{Name: "dev3", Basic: &resourceapi.BasicDevice{}},
{Name: "dev1"},
{Name: "dev2"},
{Name: "dev3"},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "slice2", UID: "slice2Uid"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: node.Name,
NodeName: &node.Name,
Driver: "driver.bar.com",
Pool: resourceapi.ResourcePool{
Name: "pool2",
ResourceSliceCount: 1,
},
Devices: []resourceapi.Device{
{Name: "dev1", Basic: &resourceapi.BasicDevice{}},
{Name: "dev2", Basic: &resourceapi.BasicDevice{}},
{Name: "dev3", Basic: &resourceapi.BasicDevice{}},
{Name: "dev1"},
{Name: "dev2"},
{Name: "dev3"},
},
},
},
@ -247,11 +247,13 @@ func validTestCases(t *testing.T, snapshotName string) []modificationTestCase {
Devices: resourceapi.DeviceClaim{
Requests: []resourceapi.DeviceRequest{
{
Name: "req1",
DeviceClassName: "defaultClass",
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: `device.driver == "driver.foo.com"`}}},
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
Count: 3,
Name: "req1",
Exactly: &resourceapi.ExactDeviceRequest{
DeviceClassName: "defaultClass",
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: `device.driver == "driver.foo.com"`}}},
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
Count: 3,
},
},
},
},
@ -284,10 +286,12 @@ func validTestCases(t *testing.T, snapshotName string) []modificationTestCase {
Devices: resourceapi.DeviceClaim{
Requests: []resourceapi.DeviceRequest{
{
Name: "req1",
DeviceClassName: "defaultClass",
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: `device.driver == "driver.bar.com"`}}},
AllocationMode: resourceapi.DeviceAllocationModeAll,
Name: "req1",
Exactly: &resourceapi.ExactDeviceRequest{
DeviceClassName: "defaultClass",
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: `device.driver == "driver.bar.com"`}}},
AllocationMode: resourceapi.DeviceAllocationModeAll,
},
},
},
},

View File

@ -39,12 +39,16 @@ var (
claim2 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-2", UID: "claim-2"}}
claim3 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-3", UID: "claim-3"}}
localSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}}
localSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}}
localSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}}
localSlice4 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
n1Name = "n1"
n2Name = "n2"
trueValue = true
localSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n1Name}}
localSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n1Name}}
localSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n2Name}}
localSlice4 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n2Name}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: &trueValue}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: &trueValue}}
globalSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-3", UID: "global-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeSelector: &apiv1.NodeSelector{}}}
class1 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-1", UID: "class-1"}}

View File

@ -31,12 +31,15 @@ import (
func TestSnapshotSliceListerList(t *testing.T) {
var (
localSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}}
localSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}}
localSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}}
localSlice4 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
n1Name = "n1"
n2Name = "n2"
trueValue = true
localSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n1Name}}
localSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n1Name}}
localSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n2Name}}
localSlice4 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &n2Name}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: &trueValue}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: &trueValue}}
globalSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-3", UID: "global-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeSelector: &apiv1.NodeSelector{}}}
)

View File

@ -34,14 +34,19 @@ import (
)
var (
node1Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node1"}}
node1Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node1"}}
node2Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node2"}}
node2Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node2"}}
node3Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-5", UID: "local-slice-5"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node3"}}
node3Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-6", UID: "local-slice-6"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node3"}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
node1Name = "node1"
node2Name = "node2"
node3Name = "node3"
trueValue = true
node1Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &node1Name}}
node1Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &node1Name}}
node2Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &node2Name}}
node2Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &node2Name}}
node3Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-5", UID: "local-slice-5"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &node3Name}}
node3Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-6", UID: "local-slice-6"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &node3Name}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: &trueValue}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: &trueValue}}
node1 = test.BuildTestNode("node1", 1000, 1000)
pod1 = test.BuildTestPod("pod1", 1, 1,
@ -600,7 +605,7 @@ func TestSnapshotForkCommitRevert(t *testing.T) {
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
}
initialDeviceClasses := map[string]*resourceapi.DeviceClass{deviceClass1.Name: deviceClass1.DeepCopy(), deviceClass2.Name: deviceClass2.DeepCopy()}
initialLocalSlices := map[string][]*resourceapi.ResourceSlice{node1Slice1.Spec.NodeName: {node1Slice1.DeepCopy()}}
initialLocalSlices := map[string][]*resourceapi.ResourceSlice{*node1Slice1.Spec.NodeName: {node1Slice1.DeepCopy()}}
initialGlobalSlices := []*resourceapi.ResourceSlice{globalSlice1.DeepCopy(), globalSlice2.DeepCopy()}
initialState := NewSnapshot(initialClaims, initialLocalSlices, initialGlobalSlices, initialDeviceClasses)
@ -614,7 +619,7 @@ func TestSnapshotForkCommitRevert(t *testing.T) {
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, podToReserve),
GetClaimId(pod1OwnClaim2): drautils.TestClaimWithPodReservations(pod1OwnClaim2, podToReserve),
}
modifiedLocalSlices := map[string][]*resourceapi.ResourceSlice{addedNodeSlice.Spec.NodeName: {addedNodeSlice.DeepCopy()}}
modifiedLocalSlices := map[string][]*resourceapi.ResourceSlice{*addedNodeSlice.Spec.NodeName: {addedNodeSlice.DeepCopy()}}
// Expected state after modifications are applied
modifiedState := NewSnapshot(
modifiedClaims,
@ -627,7 +632,7 @@ func TestSnapshotForkCommitRevert(t *testing.T) {
t.Helper()
addedSlices := []*resourceapi.ResourceSlice{addedNodeSlice.DeepCopy()}
if err := s.AddNodeResourceSlices(addedNodeSlice.Spec.NodeName, addedSlices); err != nil {
if err := s.AddNodeResourceSlices(*addedNodeSlice.Spec.NodeName, addedSlices); err != nil {
t.Fatalf("failed to add %s resource slices: %v", addedNodeSlice.Spec.NodeName, err)
}
if err := s.AddClaims([]*resourceapi.ResourceClaim{addedClaim}); err != nil {
@ -637,7 +642,7 @@ func TestSnapshotForkCommitRevert(t *testing.T) {
t.Fatalf("failed to reserve claim %s for pod %s: %v", sharedClaim1.Name, podToReserve.Name, err)
}
s.RemoveNodeResourceSlices(node1Slice1.Spec.NodeName)
s.RemoveNodeResourceSlices(*node1Slice1.Spec.NodeName)
}
compareSnapshots := func(t *testing.T, want, got *Snapshot, msg string) {

View File

@ -30,8 +30,10 @@ import (
)
func TestSanitizedNodeResourceSlices(t *testing.T) {
oldNodeName := "oldNode"
newNodeName := "newNode"
nameSuffix := "abc"
trueValue := true
device1 := resourceapi.Device{Name: "device1"}
device2 := resourceapi.Device{Name: "device2"}
@ -40,7 +42,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
allNodesSlice := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "allNodesSlice", UID: "allNodesSlice"},
Spec: resourceapi.ResourceSliceSpec{
AllNodes: true,
AllNodes: &trueValue,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "all-nodes-pool1", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -58,7 +60,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool1Slice1 := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool1Slice1", UID: "pool1Slice1"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "oldNode",
NodeName: &oldNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool1", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -67,7 +69,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool1Slice2 := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool1Slice2", UID: "pool1Slice2"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "oldNode",
NodeName: &oldNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool1", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -76,7 +78,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool2Slice1 := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool2Slice1", UID: "pool2Slice1"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "oldNode",
NodeName: &oldNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool2", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -85,7 +87,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool2Slice2 := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool2Slice2", UID: "pool2Slice2"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "oldNode",
NodeName: &oldNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool2", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -94,7 +96,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool1Slice1Sanitized := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool1Slice1-abc"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "newNode",
NodeName: &newNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool1-abc", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -103,7 +105,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool1Slice2Sanitized := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool1Slice2-abc"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "newNode",
NodeName: &newNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool1-abc", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -112,7 +114,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool2Slice1Sanitized := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool2Slice1-abc"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "newNode",
NodeName: &newNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool2-abc", Generation: 13, ResourceSliceCount: 37},
Devices: devices,
@ -121,7 +123,7 @@ func TestSanitizedNodeResourceSlices(t *testing.T) {
pool2Slice2Sanitized := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "pool2Slice2-abc"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "newNode",
NodeName: &newNodeName,
Driver: "driver.example.com",
Pool: resourceapi.ResourcePool{Name: "oldNode-pool2-abc", Generation: 13, ResourceSliceCount: 37},
Devices: devices,

View File

@ -181,7 +181,7 @@ func testResourceSlices(driverName, poolName, nodeName string, poolGen, deviceCo
ObjectMeta: metav1.ObjectMeta{Name: sliceName, UID: types.UID(sliceName)},
Spec: resourceapi.ResourceSliceSpec{
Driver: driverName,
NodeName: nodeName,
NodeName: &nodeName,
Pool: resourceapi.ResourcePool{Name: poolName, Generation: poolGen, ResourceSliceCount: sliceCount},
Devices: devices,
},

View File

@ -22,6 +22,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
fwk "k8s.io/kube-scheduler/framework"
apiv1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1"
@ -32,7 +33,8 @@ import (
)
func TestNodeInfo(t *testing.T) {
node := test.BuildTestNode("test-node", 1000, 1024)
nodeName := "test-node"
node := test.BuildTestNode(nodeName, 1000, 1024)
pods := []*apiv1.Pod{
// Use pods requesting host-ports to make sure that NodeInfo fields other than node and Pods also
// get set correctly (in this case - the UsedPorts field).
@ -51,9 +53,9 @@ func TestNodeInfo(t *testing.T) {
Name: "test-node-slice-0",
},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "test-node",
NodeName: &nodeName,
Driver: "test.driver.com",
Pool: resourceapi.ResourcePool{Name: "test-node", Generation: 13, ResourceSliceCount: 2},
Pool: resourceapi.ResourcePool{Name: nodeName, Generation: 13, ResourceSliceCount: 2},
Devices: []resourceapi.Device{{Name: "device-0"}, {Name: "device-1"}},
}},
{
@ -61,9 +63,9 @@ func TestNodeInfo(t *testing.T) {
Name: "test-node-slice-1",
},
Spec: resourceapi.ResourceSliceSpec{
NodeName: "test-node",
NodeName: &nodeName,
Driver: "test.driver.com",
Pool: resourceapi.ResourcePool{Name: "test-node", Generation: 13, ResourceSliceCount: 2},
Pool: resourceapi.ResourcePool{Name: nodeName, Generation: 13, ResourceSliceCount: 2},
Devices: []resourceapi.Device{{Name: "device-2"}, {Name: "device-3"}},
},
},
@ -71,14 +73,14 @@ func TestNodeInfo(t *testing.T) {
for _, tc := range []struct {
testName string
modFn func(info *schedulerframework.NodeInfo) *NodeInfo
wantSchedNodeInfo *schedulerframework.NodeInfo
modFn func(info fwk.NodeInfo) *NodeInfo
wantSchedNodeInfo fwk.NodeInfo
wantLocalResourceSlices []*resourceapi.ResourceSlice
wantPods []*PodInfo
}{
{
testName: "wrapping via NewNodeInfo",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
return NewNodeInfo(info.Node(), nil, testPodInfos(pods, false)...)
},
wantSchedNodeInfo: schedulerNodeInfo,
@ -86,7 +88,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via NewNodeInfo with DRA objects",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
return NewNodeInfo(info.Node(), slices, testPodInfos(pods, true)...)
},
wantSchedNodeInfo: schedulerNodeInfo,
@ -95,10 +97,10 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via NewTestNodeInfo",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
var pods []*apiv1.Pod
for _, pod := range info.Pods {
pods = append(pods, pod.Pod)
for _, pod := range info.GetPods() {
pods = append(pods, pod.GetPod())
}
return NewTestNodeInfo(info.Node(), pods...)
},
@ -107,7 +109,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via WrapSchedulerNodeInfo",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
return WrapSchedulerNodeInfo(info, nil, nil)
},
wantSchedNodeInfo: schedulerNodeInfo,
@ -115,7 +117,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via WrapSchedulerNodeInfo with DRA objects",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
podInfos := testPodInfos(pods, true)
extraInfos := make(map[types.UID]PodExtraInfo)
for _, podInfo := range podInfos {
@ -129,11 +131,11 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via SetNode+AddPod",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
result := NewNodeInfo(nil, nil)
result.SetNode(info.Node())
for _, pod := range info.Pods {
result.AddPod(&PodInfo{Pod: pod.Pod})
for _, pod := range info.GetPods() {
result.AddPod(&PodInfo{Pod: pod.GetPod()})
}
return result
},
@ -142,7 +144,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via SetNode+AddPod with DRA objects",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
result := NewNodeInfo(nil, nil)
result.LocalResourceSlices = slices
result.SetNode(info.Node())
@ -157,7 +159,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "removing pods",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
result := NewNodeInfo(info.Node(), slices, testPodInfos(pods, true)...)
for _, pod := range []*apiv1.Pod{pods[0], pods[2], pods[4]} {
if err := result.RemovePod(pod); err != nil {
@ -172,7 +174,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via WrapSchedulerNodeInfo and adding more pods",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
result := WrapSchedulerNodeInfo(info, nil, nil)
result.AddPod(testPodInfos([]*apiv1.Pod{extraPod}, false)[0])
return result
@ -182,7 +184,7 @@ func TestNodeInfo(t *testing.T) {
},
{
testName: "wrapping via WrapSchedulerNodeInfo and adding more pods using DRA",
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
modFn: func(info fwk.NodeInfo) *NodeInfo {
result := WrapSchedulerNodeInfo(info, nil, nil)
result.AddPod(testPodInfos([]*apiv1.Pod{extraPod}, true)[0])
return result
@ -193,7 +195,6 @@ func TestNodeInfo(t *testing.T) {
} {
t.Run(tc.testName, func(t *testing.T) {
wrappedNodeInfo := tc.modFn(schedulerNodeInfo.Snapshot())
// Assert that the scheduler NodeInfo object is as expected.
nodeInfoCmpOpts := []cmp.Option{
// The Node is the only unexported field in this type, and we want to compare it.
@ -202,8 +203,8 @@ func TestNodeInfo(t *testing.T) {
cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
// The pod order changes in a particular way whenever schedulerframework.RemovePod() is called. Instead of
// relying on that schedulerframework implementation detail in assertions, just ignore the order.
cmpopts.SortSlices(func(p1, p2 *schedulerframework.PodInfo) bool {
return p1.Pod.Name < p2.Pod.Name
cmpopts.SortSlices(func(p1, p2 fwk.PodInfo) bool {
return p1.GetPod().Name < p2.GetPod().Name
}),
cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
}
@ -247,7 +248,8 @@ func TestNodeInfo(t *testing.T) {
}
func TestDeepCopyNodeInfo(t *testing.T) {
node := test.BuildTestNode("node", 1000, 1000)
nodeName := "node"
node := test.BuildTestNode(nodeName, 1000, 1000)
pods := []*PodInfo{
{Pod: test.BuildTestPod("p1", 80, 0, test.WithNodeName(node.Name))},
{
@ -261,8 +263,8 @@ func TestDeepCopyNodeInfo(t *testing.T) {
},
}
slices := []*resourceapi.ResourceSlice{
{ObjectMeta: v1.ObjectMeta{Name: "slice1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node"}},
{ObjectMeta: v1.ObjectMeta{Name: "slice2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node"}},
{ObjectMeta: v1.ObjectMeta{Name: "slice1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &nodeName}},
{ObjectMeta: v1.ObjectMeta{Name: "slice2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: &nodeName}},
}
for _, tc := range []struct {

View File

@ -396,7 +396,7 @@ func TestCreateSanitizedNodeInfo(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "slice1", UID: "slice1Uid"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: oldNodeName,
NodeName: &oldNodeName,
Pool: resourceapi.ResourcePool{
Name: "pool1",
ResourceSliceCount: 1,
@ -406,7 +406,7 @@ func TestCreateSanitizedNodeInfo(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "slice2", UID: "slice2Uid"},
Spec: resourceapi.ResourceSliceSpec{
NodeName: oldNodeName,
NodeName: &oldNodeName,
Pool: resourceapi.ResourcePool{
Name: "pool2",
ResourceSliceCount: 1,