CA: implement dynamicresources.Snapshot for storing and modifying the state of DRA objects

The Snapshot can hold all DRA objects in the cluster, and expose them
to the scheduler framework via the SharedDRAManager interface.

The state of the objects can be modified during autoscaling simulations
using the provided methods.
This commit is contained in:
Kuba Tużnik 2024-12-09 13:43:51 +01:00
parent 66d0aeb3cb
commit 377639a8dc
11 changed files with 1626 additions and 16 deletions

View File

@ -17,32 +17,248 @@ limitations under the License.
package snapshot
import (
"fmt"
apiv1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/types"
drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
// Snapshot contains a snapshot of all DRA objects taken at a ~single point in time.
// ResourceClaimId is a unique identifier for a ResourceClaim.
type ResourceClaimId struct {
Name string
Namespace string
}
// GetClaimId returns the unique identifier for a ResourceClaim.
func GetClaimId(claim *resourceapi.ResourceClaim) ResourceClaimId {
return ResourceClaimId{Name: claim.Name, Namespace: claim.Namespace}
}
// Snapshot contains a snapshot of all DRA objects taken at a ~single point in time. The Snapshot should be
// obtained via the Provider. Then, it can be modified using the exposed methods, to simulate scheduling actions
// in the cluster.
type Snapshot struct {
resourceClaimsById map[ResourceClaimId]*resourceapi.ResourceClaim
resourceSlicesByNodeName map[string][]*resourceapi.ResourceSlice
nonNodeLocalResourceSlices []*resourceapi.ResourceSlice
deviceClasses map[string]*resourceapi.DeviceClass
}
// NewSnapshot returns a Snapshot created from the provided data.
func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, globalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) Snapshot {
return Snapshot{
resourceClaimsById: claims,
resourceSlicesByNodeName: nodeLocalSlices,
nonNodeLocalResourceSlices: globalSlices,
deviceClasses: deviceClasses,
}
}
// ResourceClaims exposes the Snapshot as schedulerframework.ResourceClaimTracker, in order to interact with
// the scheduler framework.
func (s Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
return nil
return snapshotClaimTracker(s)
}
// ResourceSlices exposes the Snapshot as schedulerframework.ResourceSliceLister, in order to interact with
// the scheduler framework.
func (s Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
return nil
return snapshotSliceLister(s)
}
// DeviceClasses exposes the Snapshot as schedulerframework.DeviceClassLister, in order to interact with
// the scheduler framework.
func (s Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
return nil
return snapshotClassLister(s)
}
// WrapSchedulerNodeInfo wraps the provided *schedulerframework.NodeInfo into an internal *framework.NodeInfo, adding
// dra information. Node-local ResourceSlices are added to the NodeInfo, and all ResourceClaims referenced by each Pod
// are added to each PodInfo. Returns an error if any of the Pods is missing a ResourceClaim.
func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
podExtraInfos := map[types.UID]framework.PodExtraInfo{}
for _, pod := range schedNodeInfo.Pods {
podClaims, err := s.PodClaims(pod.Pod)
if err != nil {
return nil, err
}
if len(podClaims) > 0 {
podExtraInfos[pod.Pod.UID] = framework.PodExtraInfo{NeededResourceClaims: podClaims}
}
}
nodeSlices, _ := s.NodeResourceSlices(schedNodeInfo.Node().Name)
return framework.WrapSchedulerNodeInfo(schedNodeInfo, nodeSlices, podExtraInfos), nil
}
// Clone returns a copy of this Snapshot that can be independently modified without affecting this Snapshot.
// The only mutable objects in the Snapshot are ResourceClaims, so they are deep-copied. The rest is only a
// shallow copy.
func (s Snapshot) Clone() Snapshot {
return Snapshot{}
result := Snapshot{
resourceClaimsById: map[ResourceClaimId]*resourceapi.ResourceClaim{},
resourceSlicesByNodeName: map[string][]*resourceapi.ResourceSlice{},
deviceClasses: map[string]*resourceapi.DeviceClass{},
}
// The claims are mutable, they have to be deep-copied.
for id, claim := range s.resourceClaimsById {
result.resourceClaimsById[id] = claim.DeepCopy()
}
// The rest of the objects aren't mutable, so a shallow copy should be enough.
for nodeName, slices := range s.resourceSlicesByNodeName {
for _, slice := range slices {
result.resourceSlicesByNodeName[nodeName] = append(result.resourceSlicesByNodeName[nodeName], slice)
}
}
for _, slice := range s.nonNodeLocalResourceSlices {
result.nonNodeLocalResourceSlices = append(result.nonNodeLocalResourceSlices, slice)
}
for className, class := range s.deviceClasses {
result.deviceClasses[className] = class
}
return result
}
// AddClaims adds additional ResourceClaims to the Snapshot. It can be used e.g. if we need to duplicate a Pod that
// owns ResourceClaims. Returns an error if any of the claims is already tracked in the snapshot.
func (s Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
for _, claim := range newClaims {
if _, found := s.resourceClaimsById[GetClaimId(claim)]; found {
return fmt.Errorf("claim %s/%s already tracked in the snapshot", claim.Namespace, claim.Name)
}
}
for _, claim := range newClaims {
s.resourceClaimsById[GetClaimId(claim)] = claim
}
return nil
}
// PodClaims returns ResourceClaims objects for all claims referenced by the Pod. If any of the referenced claims
// isn't tracked in the Snapshot, an error is returned.
func (s Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
var result []*resourceapi.ResourceClaim
for _, claimRef := range pod.Spec.ResourceClaims {
claim, err := s.claimForPod(pod, claimRef)
if err != nil {
return nil, fmt.Errorf("error while obtaining ResourceClaim %s for pod %s/%s: %v", claimRef.Name, pod.Namespace, pod.Name, err)
}
result = append(result, claim)
}
return result, nil
}
// RemovePodOwnedClaims iterates over all the claims referenced by the Pod, and removes the ones owned by the Pod from the Snapshot.
// Claims referenced by the Pod but not owned by it are not removed, but the Pod's reservation is removed from them.
// This method removes all relevant claims that are in the snapshot, and doesn't error out if any of the claims are missing.
func (s Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
for _, podClaimRef := range pod.Spec.ResourceClaims {
claimName := claimRefToName(pod, podClaimRef)
if claimName == "" {
// This most likely means that the Claim hasn't actually been created. Nothing to remove/modify, so continue to the next claim.
continue
}
claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
claim, found := s.resourceClaimsById[claimId]
if !found {
// The claim isn't tracked in the snapshot for some reason. Nothing to remove/modify, so continue to the next claim.
continue
}
if ownerName, ownerUid := drautils.ClaimOwningPod(claim); ownerName == pod.Name && ownerUid == pod.UID {
delete(s.resourceClaimsById, claimId)
} else {
drautils.ClearPodReservationInPlace(claim, pod)
}
}
}
// ReservePodClaims adds a reservation for the provided Pod to all the claims it references. If any of the referenced
// claims isn't tracked in the Snapshot, or if any of the claims are already at maximum reservation count, an error is
// returned.
func (s Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
claims, err := s.PodClaims(pod)
if err != nil {
return err
}
for _, claim := range claims {
if drautils.ClaimFullyReserved(claim) && !drautils.ClaimReservedForPod(claim, pod) {
return fmt.Errorf("claim %s/%s already has max number of reservations set, can't add more", claim.Namespace, claim.Name)
}
}
for _, claim := range claims {
drautils.AddPodReservationInPlace(claim, pod)
}
return nil
}
// UnreservePodClaims removes reservations for the provided Pod from all the claims it references. If any of the referenced
// claims isn't tracked in the Snapshot, an error is returned. If a claim is owned by the pod, or if the claim has no more reservations,
// its allocation is cleared.
func (s Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
claims, err := s.PodClaims(pod)
if err != nil {
return err
}
for _, claim := range claims {
ownerPodName, ownerPodUid := drautils.ClaimOwningPod(claim)
podOwnedClaim := ownerPodName == pod.Name && ownerPodUid == ownerPodUid
drautils.ClearPodReservationInPlace(claim, pod)
if podOwnedClaim || !drautils.ClaimInUse(claim) {
drautils.DeallocateClaimInPlace(claim)
}
}
return nil
}
// NodeResourceSlices returns all node-local ResourceSlices for the given Node.
func (s Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
slices, found := s.resourceSlicesByNodeName[nodeName]
return slices, found
}
// AddNodeResourceSlices adds additional node-local ResourceSlices to the Snapshot. This should be used whenever a Node with
// node-local ResourceSlices is duplicated in the cluster snapshot.
func (s Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
if _, alreadyInSnapshot := s.resourceSlicesByNodeName[nodeName]; alreadyInSnapshot {
return fmt.Errorf("node %q ResourceSlices already present", nodeName)
}
s.resourceSlicesByNodeName[nodeName] = slices
return nil
}
// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given nodeName.
// It's a no-op if there aren't any slices to remove.
func (s Snapshot) RemoveNodeResourceSlices(nodeName string) {
delete(s.resourceSlicesByNodeName, nodeName)
}
func (s Snapshot) claimForPod(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) (*resourceapi.ResourceClaim, error) {
claimName := claimRefToName(pod, claimRef)
if claimName == "" {
return nil, fmt.Errorf("couldn't determine ResourceClaim name")
}
claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: pod.Namespace}]
if !found {
return nil, fmt.Errorf("couldn't find ResourceClaim %q", claimName)
}
return claim, nil
}
func claimRefToName(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) string {
if claimRef.ResourceClaimName != nil {
return *claimRef.ResourceClaimName
}
for _, claimStatus := range pod.Status.ResourceClaimStatuses {
if claimStatus.Name == claimRef.Name && claimStatus.ResourceClaimName != nil {
return *claimStatus.ResourceClaimName
}
}
return ""
}

View File

@ -0,0 +1,112 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"fmt"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/dynamic-resource-allocation/structured"
)
type snapshotClaimTracker Snapshot
func (s snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
var result []*resourceapi.ResourceClaim
for _, claim := range s.resourceClaimsById {
result = append(result, claim)
}
return result, nil
}
func (s snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: namespace}]
if !found {
return nil, fmt.Errorf("claim %s/%s not found", namespace, claimName)
}
return claim, nil
}
func (s snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
result := sets.New[structured.DeviceID]()
for _, claim := range s.resourceClaimsById {
result = result.Union(claimAllocatedDevices(claim))
}
return result, nil
}
func (s snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
// The DRA scheduler plugin calls this at the end of the scheduling phase, in Reserve. Then, the allocation is persisted via an API
// call during the binding phase.
//
// In Cluster Autoscaler only the scheduling phase is run, so SignalClaimPendingAllocation() is used to obtain the allocation
// and persist it in-memory in the snapshot.
ref := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
claim, found := s.resourceClaimsById[ref]
if !found {
return fmt.Errorf("claim %s/%s not found", allocatedClaim.Namespace, allocatedClaim.Name)
}
if claim.UID != claimUid {
return fmt.Errorf("claim %s/%s: snapshot has UID %q, allocation came for UID %q - shouldn't happenn", allocatedClaim.Namespace, allocatedClaim.Name, claim.UID, claimUid)
}
s.resourceClaimsById[ref] = allocatedClaim
return nil
}
func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
// The DRA scheduler plugin calls this at the beginning of Filter, and fails the filter if true is returned to handle race conditions.
//
// In the scheduler implementation, ClaimHasPendingAllocation() starts answering true after SignalClaimPendingAllocation()
// is called at the end of the scheduling phase, until RemoveClaimPendingAllocation() is called after the allocation API call
// is made in the asynchronous bind phase.
//
// In Cluster Autoscaler only the scheduling phase is run, and SignalClaimPendingAllocation() synchronously persists the allocation
// in-memory. So the race conditions don't apply, and this should always return false not to block the filter.
return false
}
func (s snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
// it to satisfy the interface, but it should never be called.
panic("snapshotClaimTracker.RemoveClaimPendingAllocation() was called - this should never happen")
}
func (s snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
// it to satisfy the interface, but it should never be called.
panic("snapshotClaimTracker.AssumeClaimAfterAPICall() was called - this should never happen")
}
func (s snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
// it to satisfy the interface, but it should never be called.
panic("snapshotClaimTracker.AssumedClaimRestore() was called - this should never happen")
}
// claimAllocatedDevices returns ids of all devices allocated in the provided claim.
func claimAllocatedDevices(claim *resourceapi.ResourceClaim) sets.Set[structured.DeviceID] {
if claim.Status.Allocation == nil {
return nil
}
result := sets.New[structured.DeviceID]()
for _, device := range claim.Status.Allocation.Devices.Results {
result.Insert(structured.MakeDeviceID(device.Driver, device.Pool, device.Device))
}
return result
}

View File

@ -0,0 +1,246 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
"k8s.io/dynamic-resource-allocation/structured"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
var (
claim1 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-1", UID: "claim-1", Namespace: "default"}}
claim2 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-2", UID: "claim-2", Namespace: "default"}}
claim3 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-3", UID: "claim-3", Namespace: "default"}}
allocatedClaim1 = &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{Name: "claim-1", UID: "claim-1", Namespace: "default"},
Status: resourceapi.ResourceClaimStatus{
Allocation: &resourceapi.AllocationResult{
Devices: resourceapi.DeviceAllocationResult{
Results: []resourceapi.DeviceRequestAllocationResult{
{Request: "req-1", Driver: "driver.example.com", Pool: "pool-1", Device: "device-1"},
{Request: "req-2", Driver: "driver.example.com", Pool: "pool-1", Device: "device-2"},
},
},
},
},
}
allocatedClaim2 = &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{Name: "claim-2", UID: "claim-2", Namespace: "default"},
Status: resourceapi.ResourceClaimStatus{
Allocation: &resourceapi.AllocationResult{
Devices: resourceapi.DeviceAllocationResult{
Results: []resourceapi.DeviceRequestAllocationResult{
{Request: "req-1", Driver: "driver.example.com", Pool: "pool-1", Device: "device-3"},
{Request: "req-2", Driver: "driver2.example.com", Pool: "pool-2", Device: "device-1"},
},
},
},
},
}
)
func TestSnapshotClaimTrackerList(t *testing.T) {
for _, tc := range []struct {
testName string
claims map[ResourceClaimId]*resourceapi.ResourceClaim
wantClaims []*resourceapi.ResourceClaim
}{
{
testName: "no claims in snapshot",
wantClaims: []*resourceapi.ResourceClaim{},
},
{
testName: "claims present in snapshot",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(claim1): claim1,
GetClaimId(claim2): claim2,
GetClaimId(claim3): claim3,
},
wantClaims: []*resourceapi.ResourceClaim{claim1, claim2, claim3},
},
} {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(tc.claims, nil, nil, nil)
var resourceClaimTracker schedulerframework.ResourceClaimTracker = snapshot.ResourceClaims()
claims, err := resourceClaimTracker.List()
if err != nil {
t.Fatalf("snapshotClaimTracker.List(): got unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantClaims, claims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
t.Errorf("snapshotClaimTracker.List(): unexpected output (-want +got): %s", diff)
}
})
}
}
func TestSnapshotClaimTrackerGet(t *testing.T) {
for _, tc := range []struct {
testName string
claimName string
claimNamespace string
wantClaim *resourceapi.ResourceClaim
wantErr error
}{
{
testName: "claim present in snapshot",
claimName: "claim-2",
claimNamespace: "default",
wantClaim: claim2,
},
{
testName: "claim not present in snapshot (wrong name)",
claimName: "claim-1337",
claimNamespace: "default",
wantErr: cmpopts.AnyError,
},
{
testName: "claim not present in snapshot (wrong namespace)",
claimName: "claim-2",
claimNamespace: "non-default",
wantErr: cmpopts.AnyError,
},
} {
t.Run(tc.testName, func(t *testing.T) {
claims := map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(claim1): claim1,
GetClaimId(claim2): claim2,
GetClaimId(claim3): claim3,
}
snapshot := NewSnapshot(claims, nil, nil, nil)
var resourceClaimTracker schedulerframework.ResourceClaimTracker = snapshot.ResourceClaims()
claim, err := resourceClaimTracker.Get(tc.claimNamespace, tc.claimName)
if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("snapshotClaimTracker.Get(): unexpected error (-want +got): %s", diff)
}
if diff := cmp.Diff(tc.wantClaim, claim); diff != "" {
t.Errorf("snapshotClaimTracker.Get(): unexpected output (-want +got): %s", diff)
}
})
}
}
func TestSnapshotClaimTrackerListAllAllocatedDevices(t *testing.T) {
for _, tc := range []struct {
testName string
claims map[ResourceClaimId]*resourceapi.ResourceClaim
wantDevices sets.Set[structured.DeviceID]
}{
{
testName: "no claims in snapshot",
wantDevices: sets.New[structured.DeviceID](),
},
{
testName: "claims present in snapshot, all unallocated",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(claim1): claim1,
GetClaimId(claim2): claim2,
GetClaimId(claim3): claim3,
},
wantDevices: sets.New[structured.DeviceID](),
},
{
testName: "claims present in snapshot, some allocated",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(allocatedClaim1): allocatedClaim1,
GetClaimId(allocatedClaim2): allocatedClaim2,
GetClaimId(claim3): claim3,
},
wantDevices: sets.New[structured.DeviceID](
structured.MakeDeviceID("driver.example.com", "pool-1", "device-1"),
structured.MakeDeviceID("driver.example.com", "pool-1", "device-2"),
structured.MakeDeviceID("driver.example.com", "pool-1", "device-3"),
structured.MakeDeviceID("driver2.example.com", "pool-2", "device-1"),
),
},
} {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(tc.claims, nil, nil, nil)
var resourceClaimTracker schedulerframework.ResourceClaimTracker = snapshot.ResourceClaims()
devices, err := resourceClaimTracker.ListAllAllocatedDevices()
if err != nil {
t.Fatalf("snapshotClaimTracker.ListAllAllocatedDevices(): got unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantDevices, devices, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("snapshotClaimTracker.ListAllAllocatedDevices(): unexpected output (-want +got): %s", diff)
}
})
}
}
func TestSnapshotClaimTrackerSignalClaimPendingAllocation(t *testing.T) {
for _, tc := range []struct {
testName string
claimUid types.UID
allocatedClaim *resourceapi.ResourceClaim
wantErr error
}{
{
testName: "claim not present in snapshot",
claimUid: "bad-name",
allocatedClaim: &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "bad-name", UID: "bad-name", Namespace: "default"}},
wantErr: cmpopts.AnyError,
},
{
testName: "provided UIDs don't match",
claimUid: "bad-name",
allocatedClaim: allocatedClaim2,
wantErr: cmpopts.AnyError,
},
{
testName: "claim correctly modified",
claimUid: "claim-2",
allocatedClaim: allocatedClaim2,
},
} {
t.Run(tc.testName, func(t *testing.T) {
claims := map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(claim1): claim1,
GetClaimId(claim2): claim2,
GetClaimId(claim3): claim3,
}
snapshot := NewSnapshot(claims, nil, nil, nil)
var resourceClaimTracker schedulerframework.ResourceClaimTracker = snapshot.ResourceClaims()
err := resourceClaimTracker.SignalClaimPendingAllocation(tc.claimUid, tc.allocatedClaim)
if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("snapshotClaimTracker.SignalClaimPendingAllocation(): unexpected error (-want +got): %s", diff)
}
if tc.wantErr != nil {
return
}
claimAfterSignal, err := resourceClaimTracker.Get(tc.allocatedClaim.Namespace, tc.allocatedClaim.Name)
if err != nil {
t.Fatalf("snapshotClaimTracker.Get(): got unexpected error: %v", err)
}
if diff := cmp.Diff(tc.allocatedClaim, claimAfterSignal); diff != "" {
t.Errorf("Claim in unexpected state after snapshotClaimTracker.SignalClaimPendingAllocation() (-want +got): %s", diff)
}
})
}
}

View File

@ -0,0 +1,41 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"fmt"
resourceapi "k8s.io/api/resource/v1beta1"
)
type snapshotClassLister Snapshot
func (s snapshotClassLister) List() ([]*resourceapi.DeviceClass, error) {
var result []*resourceapi.DeviceClass
for _, class := range s.deviceClasses {
result = append(result, class)
}
return result, nil
}
func (s snapshotClassLister) Get(className string) (*resourceapi.DeviceClass, error) {
class, found := s.deviceClasses[className]
if !found {
return nil, fmt.Errorf("DeviceClass %q not found", className)
}
return class, nil
}

View File

@ -0,0 +1,99 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
var (
class1 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-1", UID: "class-1"}}
class2 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-2", UID: "class-2"}}
class3 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-3", UID: "class-3"}}
)
func TestSnapshotClassListerList(t *testing.T) {
for _, tc := range []struct {
testName string
classes map[string]*resourceapi.DeviceClass
wantClasses []*resourceapi.DeviceClass
}{
{
testName: "no classes in snapshot",
wantClasses: []*resourceapi.DeviceClass{},
},
{
testName: "classes present in snapshot",
classes: map[string]*resourceapi.DeviceClass{"class-1": class1, "class-2": class2, "class-3": class3},
wantClasses: []*resourceapi.DeviceClass{class1, class2, class3},
},
} {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(nil, nil, nil, tc.classes)
var deviceClassLister schedulerframework.DeviceClassLister = snapshot.DeviceClasses()
classes, err := deviceClassLister.List()
if err != nil {
t.Fatalf("snapshotClassLister.List(): got unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantClasses, classes, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.DeviceClass]()); diff != "" {
t.Errorf("snapshotClassLister.List(): unexpected output (-want +got): %s", diff)
}
})
}
}
func TestSnapshotClassListerGet(t *testing.T) {
for _, tc := range []struct {
testName string
classes map[string]*resourceapi.DeviceClass
className string
wantClass *resourceapi.DeviceClass
wantErr error
}{
{
testName: "class present in snapshot",
className: "class-2",
wantClass: class2,
},
{
testName: "class not present in snapshot",
className: "class-1337",
wantErr: cmpopts.AnyError,
},
} {
t.Run(tc.testName, func(t *testing.T) {
classes := map[string]*resourceapi.DeviceClass{"class-1": class1, "class-2": class2, "class-3": class3}
snapshot := NewSnapshot(nil, nil, nil, classes)
var deviceClassLister schedulerframework.DeviceClassLister = snapshot.DeviceClasses()
class, err := deviceClassLister.Get(tc.className)
if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("snapshotClassLister.Get(): unexpected error (-want +got): %s", diff)
}
if diff := cmp.Diff(tc.wantClass, class); diff != "" {
t.Errorf("snapshotClassLister.Get(): unexpected output (-want +got): %s", diff)
}
})
}
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
resourceapi "k8s.io/api/resource/v1beta1"
)
type snapshotSliceLister Snapshot
func (s snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
var result []*resourceapi.ResourceSlice
for _, slices := range s.resourceSlicesByNodeName {
for _, slice := range slices {
result = append(result, slice)
}
}
result = append(result, s.nonNodeLocalResourceSlices...)
return result, nil
}

View File

@ -0,0 +1,88 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
apiv1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
func TestSnapshotSliceListerList(t *testing.T) {
var (
localSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}}
localSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}}
localSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}}
localSlice4 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
globalSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-3", UID: "global-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeSelector: &apiv1.NodeSelector{}}}
)
for _, tc := range []struct {
testName string
localSlices map[string][]*resourceapi.ResourceSlice
globalSlices []*resourceapi.ResourceSlice
wantSlices []*resourceapi.ResourceSlice
}{
{
testName: "no slices in snapshot",
wantSlices: []*resourceapi.ResourceSlice{},
},
{
testName: "local slices in snapshot",
localSlices: map[string][]*resourceapi.ResourceSlice{
"n1": {localSlice1, localSlice2},
"n2": {localSlice3, localSlice4},
},
wantSlices: []*resourceapi.ResourceSlice{localSlice1, localSlice2, localSlice3, localSlice4},
},
{
testName: "global slices in snapshot",
globalSlices: []*resourceapi.ResourceSlice{globalSlice1, globalSlice2, globalSlice3},
wantSlices: []*resourceapi.ResourceSlice{globalSlice1, globalSlice2, globalSlice3},
},
{
testName: "global and local slices in snapshot",
localSlices: map[string][]*resourceapi.ResourceSlice{
"n1": {localSlice1, localSlice2},
"n2": {localSlice3, localSlice4},
},
globalSlices: []*resourceapi.ResourceSlice{globalSlice1, globalSlice2, globalSlice3},
wantSlices: []*resourceapi.ResourceSlice{localSlice1, localSlice2, localSlice3, localSlice4, globalSlice1, globalSlice2, globalSlice3},
},
} {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(nil, tc.localSlices, tc.globalSlices, nil)
var resourceSliceLister schedulerframework.ResourceSliceLister = snapshot.ResourceSlices()
slices, err := resourceSliceLister.List()
if err != nil {
t.Fatalf("snapshotSliceLister.List(): got unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantSlices, slices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
t.Errorf("snapshotSliceLister.List(): unexpected output (-want +got): %s", diff)
}
})
}
}

View File

@ -0,0 +1,704 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
apiv1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
var (
node1Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node1"}}
node1Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node1"}}
node2Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node2"}}
node2Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node2"}}
node3Slice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-5", UID: "local-slice-5"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node3"}}
node3Slice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-6", UID: "local-slice-6"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node3"}}
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}}
node1 = test.BuildTestNode("node1", 1000, 1000)
pod1 = test.BuildTestPod("pod1", 1, 1,
test.WithResourceClaim("ownClaim1", "pod1-ownClaim1-abc", "pod1-ownClaim1"),
test.WithResourceClaim("ownClaim2", "pod1-ownClaim2-abc", "pod1-ownClaim2"),
test.WithResourceClaim("sharedClaim1", "sharedClaim1", ""),
test.WithResourceClaim("sharedClaim2", "sharedClaim2", ""),
)
pod2 = test.BuildTestPod("pod2", 1, 1,
test.WithResourceClaim("ownClaim1", "pod2-ownClaim1-abc", "pod1-ownClaim1"),
test.WithResourceClaim("sharedClaim1", "sharedClaim1", ""),
test.WithResourceClaim("sharedClaim3", "sharedClaim3", ""),
)
sharedClaim1 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "sharedClaim1", UID: "sharedClaim1", Namespace: "default"}}
sharedClaim2 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "sharedClaim2", UID: "sharedClaim2", Namespace: "default"}}
sharedClaim3 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "sharedClaim3", UID: "sharedClaim3", Namespace: "default"}}
pod1OwnClaim1 = drautils.TestClaimWithPodOwnership(pod1,
&resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "pod1-ownClaim1-abc", UID: "pod1-ownClaim1-abc", Namespace: "default"}},
)
pod1OwnClaim2 = drautils.TestClaimWithPodOwnership(pod1,
&resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "pod1-ownClaim2-abc", UID: "pod1-ownClaim2-abc", Namespace: "default"}},
)
pod2OwnClaim1 = drautils.TestClaimWithPodOwnership(pod2,
&resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "pod2-ownClaim1-abc", UID: "pod2-ownClaim1-abc", Namespace: "default"}},
)
)
func TestSnapshotResourceClaims(t *testing.T) {
pod1NoClaimsInStatus := pod1.DeepCopy()
pod1NoClaimsInStatus.Status.ResourceClaimStatuses = nil
for _, tc := range []struct {
testName string
claims map[ResourceClaimId]*resourceapi.ResourceClaim
claimsModFun func(snapshot Snapshot) error
wantClaimsModFunErr error
pod *apiv1.Pod
wantPodClaims []*resourceapi.ResourceClaim
wantPodClaimsErr error
wantAllClaims []*resourceapi.ResourceClaim
}{
{
testName: "PodClaims(): missing pod-owned claim referenced by pod is an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(sharedClaim2): sharedClaim2.DeepCopy(),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
},
pod: pod1,
wantPodClaimsErr: cmpopts.AnyError,
},
{
testName: "PodClaims(): missing shared claim referenced by pod is an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
pod: pod1,
wantPodClaimsErr: cmpopts.AnyError,
},
{
testName: "PodClaims(): claim template set but no claim name in status is an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(sharedClaim2): sharedClaim2.DeepCopy(),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
pod: pod1NoClaimsInStatus,
wantPodClaimsErr: cmpopts.AnyError,
},
{
testName: "PodClaims(): all shared and pod-owned claims are returned for a pod",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(sharedClaim2): sharedClaim2.DeepCopy(),
GetClaimId(sharedClaim3): sharedClaim3.DeepCopy(),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
pod: pod1,
wantPodClaims: []*resourceapi.ResourceClaim{sharedClaim1, sharedClaim2, pod1OwnClaim1, pod1OwnClaim2},
},
{
testName: "AddClaims(): trying to add a duplicate claim is an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
return snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), sharedClaim1.DeepCopy()})
},
wantClaimsModFunErr: cmpopts.AnyError,
wantAllClaims: []*resourceapi.ResourceClaim{sharedClaim1, pod1OwnClaim1}, // unchanged on error
},
{
testName: "AddClaims(): new claims are correctly added",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), pod2OwnClaim1.DeepCopy()}); err != nil {
return err
}
return snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim3.DeepCopy(), pod1OwnClaim2.DeepCopy()})
},
wantAllClaims: []*resourceapi.ResourceClaim{sharedClaim1, sharedClaim2, sharedClaim3, pod1OwnClaim1, pod1OwnClaim2, pod2OwnClaim1}, // 4 new claims added
},
{
testName: "RemovePodOwnedClaims(): pod-owned claims are correctly removed",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1.DeepCopy(),
GetClaimId(sharedClaim2): sharedClaim2.DeepCopy(),
GetClaimId(sharedClaim3): sharedClaim3.DeepCopy(),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
snapshot.RemovePodOwnedClaims(pod1)
return nil
},
pod: pod1,
wantPodClaimsErr: cmpopts.AnyError,
wantAllClaims: []*resourceapi.ResourceClaim{sharedClaim1, sharedClaim2, sharedClaim3, pod2OwnClaim1}, // pod1OwnClaim1 and pod1OwnClaim2 removed
},
{
testName: "RemovePodOwnedClaims(): pod reservations in shared claims are correctly removed",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod1, pod2),
GetClaimId(sharedClaim2): drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
snapshot.RemovePodOwnedClaims(pod1)
return nil
},
pod: pod1,
wantPodClaimsErr: cmpopts.AnyError,
wantAllClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithPodReservations(sharedClaim1, pod2), // pod1 reservation removed
sharedClaim2, // pod1 reservation removed
drautils.TestClaimWithPodReservations(sharedClaim3, pod2), // unchanged
pod2OwnClaim1, // unchanged
},
},
{
testName: "ReservePodClaims(): missing claims are an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
// sharedClaim2 is missing, so this should be an error.
return snapshot.ReservePodClaims(pod1)
},
wantClaimsModFunErr: cmpopts.AnyError,
wantAllClaims: []*resourceapi.ResourceClaim{ // unchanged on error
drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
pod2OwnClaim1,
drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
pod1OwnClaim2,
},
},
{
testName: "ReservePodClaims(): trying to exceed max reservation limit is an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
GetClaimId(sharedClaim2): drautils.TestClaimWithPodReservations(sharedClaim2, testPods(resourceapi.ResourceClaimReservedForMaxSize)...),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
// sharedClaim2 is missing in claims above, so this should be an error.
return snapshot.ReservePodClaims(pod1)
},
wantClaimsModFunErr: cmpopts.AnyError,
wantAllClaims: []*resourceapi.ResourceClaim{ // unchanged on error
drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
drautils.TestClaimWithPodReservations(sharedClaim2, testPods(resourceapi.ResourceClaimReservedForMaxSize)...),
drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
pod2OwnClaim1,
drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
pod1OwnClaim2,
},
},
{
testName: "ReservePodClaims(): pod reservations are correctly added",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
GetClaimId(sharedClaim2): sharedClaim2.DeepCopy(),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
return snapshot.ReservePodClaims(pod1)
},
pod: pod1,
wantPodClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithPodReservations(sharedClaim1, pod2, pod1),
drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
drautils.TestClaimWithPodReservations(pod1OwnClaim2, pod1),
},
wantAllClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithPodReservations(sharedClaim1, pod2, pod1), // pod1 reservation added to another reservation in a shared claim
drautils.TestClaimWithPodReservations(sharedClaim2, pod1), // pod1 reservation added to a shared claim
drautils.TestClaimWithPodReservations(sharedClaim3, pod2), // unchanged
pod2OwnClaim1, // unchanged
drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1), // unchanged, pod1 reservation already present
drautils.TestClaimWithPodReservations(pod1OwnClaim2, pod1), // pod1 reservation added to its own claim
},
},
{
testName: "UnreservePodClaims(): missing claim is an error",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod1, pod2),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
// sharedClaim2 is missing in claims above, so this should be an error.
return snapshot.UnreservePodClaims(pod1)
},
wantClaimsModFunErr: cmpopts.AnyError,
wantAllClaims: []*resourceapi.ResourceClaim{ // unchanged on error
drautils.TestClaimWithPodReservations(sharedClaim1, pod1, pod2),
drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
pod2OwnClaim1,
drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
pod1OwnClaim2,
},
},
{
testName: "UnreservePodClaims(): correctly removes reservations from pod-owned and shared claims",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod1, pod2),
GetClaimId(sharedClaim2): drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): pod2OwnClaim1.DeepCopy(),
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
},
claimsModFun: func(snapshot Snapshot) error {
return snapshot.UnreservePodClaims(pod1)
},
pod: pod1,
wantPodClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
sharedClaim2,
pod1OwnClaim1,
pod1OwnClaim2,
},
wantAllClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithPodReservations(sharedClaim1, pod2), // pod1 reservation removed, pod2 left
sharedClaim2, // pod1 reservation removed, none left
drautils.TestClaimWithPodReservations(sharedClaim3, pod2), // unchanged
pod2OwnClaim1, // unchanged
pod1OwnClaim1, // pod1 reservation removed
pod1OwnClaim2, // unchanged
},
},
{
testName: "UnreservePodClaims(): correctly clears allocations from pod-owned and unused shared claims",
claims: map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(sharedClaim1, pod1, pod2), nil),
GetClaimId(sharedClaim2): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(sharedClaim2, pod1), nil),
GetClaimId(sharedClaim3): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(sharedClaim3, pod2), nil),
GetClaimId(pod2OwnClaim1): drautils.TestClaimWithAllocation(pod2OwnClaim1.DeepCopy(), nil),
GetClaimId(pod1OwnClaim1): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1), nil),
GetClaimId(pod1OwnClaim2): drautils.TestClaimWithAllocation(pod1OwnClaim2.DeepCopy(), nil),
},
claimsModFun: func(snapshot Snapshot) error {
return snapshot.UnreservePodClaims(pod1)
},
pod: pod1,
wantPodClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(sharedClaim1, pod2), nil),
sharedClaim2,
pod1OwnClaim1,
pod1OwnClaim2,
},
wantAllClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(sharedClaim1, pod2), nil), // sharedClaim1 still in use by pod2, so allocation kept
sharedClaim2, // pod1 reservation removed, none left so allocation cleared
drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(sharedClaim3, pod2), nil), // unchanged
drautils.TestClaimWithAllocation(pod2OwnClaim1, nil), // unchanged
pod1OwnClaim1, // pod1 reservation removed, allocation cleared
pod1OwnClaim2, // allocation cleared despite lack of reservation
},
},
} {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(tc.claims, nil, nil, nil)
if tc.claimsModFun != nil {
err := tc.claimsModFun(snapshot)
if diff := cmp.Diff(tc.wantClaimsModFunErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("Snapshot modification: unexpected error (-want +got): %s", diff)
}
}
if tc.pod != nil {
podClaims, err := snapshot.PodClaims(tc.pod)
if diff := cmp.Diff(tc.wantPodClaimsErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("Snapshot.PodClaims(): unexpected error (-want +got): %s", diff)
}
if diff := cmp.Diff(tc.wantPodClaims, podClaims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
t.Errorf("Snapshot.PodClaims(): unexpected output (-want +got): %s", diff)
}
}
if tc.wantAllClaims != nil {
allClaims, err := snapshot.ResourceClaims().List()
if err != nil {
t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantAllClaims, allClaims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
t.Errorf("Snapshot: unexpected ResourceClaim state (-want +got): %s", diff)
}
}
})
}
}
func TestSnapshotResourceSlices(t *testing.T) {
localSlices := map[string][]*resourceapi.ResourceSlice{
"node1": {node1Slice1, node1Slice2},
"node2": {node2Slice1, node2Slice2},
}
globalSlices := []*resourceapi.ResourceSlice{globalSlice1, globalSlice2}
allSlices := append(globalSlices, node1Slice1, node1Slice2, node2Slice1, node2Slice2)
extraNode3Slice1 := node3Slice1
extraNode3Slice2 := node3Slice2
for _, tc := range []struct {
testName string
slicesModFun func(snapshot Snapshot) error
wantSlicesModFunErr error
nodeName string
wantNodeSlices []*resourceapi.ResourceSlice
wantNodeSlicesFound bool
wantAllSlices []*resourceapi.ResourceSlice
}{
{
testName: "NodeResourceSlices(): unknown nodeName results in found=false",
nodeName: "node3",
wantNodeSlicesFound: false,
},
{
testName: "NodeResourceSlices(): all node-local slices are correctly returned",
nodeName: "node2",
wantNodeSlicesFound: true,
wantNodeSlices: []*resourceapi.ResourceSlice{node2Slice1, node2Slice2},
},
{
testName: "AddNodeResourceSlices(): adding slices for a Node that already has slices tracked is an error",
slicesModFun: func(snapshot Snapshot) error {
return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1})
},
wantSlicesModFunErr: cmpopts.AnyError,
wantAllSlices: allSlices,
},
{
testName: "AddNodeResourceSlices(): adding slices for a new Node works correctly",
slicesModFun: func(snapshot Snapshot) error {
return snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{extraNode3Slice1, extraNode3Slice2})
},
nodeName: "node3",
wantNodeSlicesFound: true,
wantNodeSlices: []*resourceapi.ResourceSlice{extraNode3Slice1, extraNode3Slice2},
wantAllSlices: append(allSlices, extraNode3Slice1, extraNode3Slice2),
},
{
testName: "RemoveNodeResourceSlices(): removing slices for a non-existing Node is a no-op",
slicesModFun: func(snapshot Snapshot) error {
snapshot.RemoveNodeResourceSlices("node3")
return nil
},
wantAllSlices: allSlices,
},
{
testName: "RemoveNodeResourceSlices(): removing slices for an existing Node works correctly",
slicesModFun: func(snapshot Snapshot) error {
snapshot.RemoveNodeResourceSlices("node2")
return nil
},
wantAllSlices: []*resourceapi.ResourceSlice{node1Slice1, node1Slice2, globalSlice1, globalSlice2},
},
} {
t.Run(tc.testName, func(t *testing.T) {
snapshot := NewSnapshot(nil, localSlices, globalSlices, nil)
if tc.slicesModFun != nil {
err := tc.slicesModFun(snapshot)
if diff := cmp.Diff(tc.wantSlicesModFunErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("Snapshot modification: unexpected error (-want +got): %s", diff)
}
}
if tc.nodeName != "" {
nodeSlices, found := snapshot.NodeResourceSlices(tc.nodeName)
if tc.wantNodeSlicesFound != found {
t.Fatalf("Snapshot.NodeResourceSlices(): unexpected found value: want %v, got %v", tc.wantNodeSlicesFound, found)
}
if diff := cmp.Diff(tc.wantNodeSlices, nodeSlices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
t.Errorf("Snapshot.NodeResourceSlices(): unexpected output (-want +got): %s", diff)
}
}
if tc.wantAllSlices != nil {
allSlices, err := snapshot.ResourceSlices().List()
if err != nil {
t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantAllSlices, allSlices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
t.Errorf("Snapshot: unexpected ResourceSlice state (-want +got): %s", diff)
}
}
})
}
}
func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
noClaimsPod1 := test.BuildTestPod("noClaimsPod1", 1, 1)
noClaimsPod2 := test.BuildTestPod("noClaimsPod2", 1, 1)
missingClaimPod := test.BuildTestPod("missingClaimPod", 1, 1, test.WithResourceClaim("ref1", "missing-claim-abc", "missing-claim"))
noSlicesNode := test.BuildTestNode("noSlicesNode", 1000, 1000)
noDraNodeInfo := schedulerframework.NewNodeInfo(noClaimsPod1, noClaimsPod2)
noDraNodeInfo.SetNode(noSlicesNode)
resourceSlicesNodeInfo := schedulerframework.NewNodeInfo(noClaimsPod1, noClaimsPod2)
resourceSlicesNodeInfo.SetNode(node1)
resourceClaimsNodeInfo := schedulerframework.NewNodeInfo(pod1, pod2, noClaimsPod1, noClaimsPod2)
resourceClaimsNodeInfo.SetNode(noSlicesNode)
fullDraNodeInfo := schedulerframework.NewNodeInfo(pod1, pod2, noClaimsPod1, noClaimsPod2)
fullDraNodeInfo.SetNode(node1)
missingClaimNodeInfo := schedulerframework.NewNodeInfo(pod1, pod2, noClaimsPod1, noClaimsPod2, missingClaimPod)
missingClaimNodeInfo.SetNode(node1)
for _, tc := range []struct {
testName string
schedNodeInfo *schedulerframework.NodeInfo
wantNodeInfo *framework.NodeInfo
wantErr error
}{
{
testName: "no data to add to the wrapper",
schedNodeInfo: noDraNodeInfo,
wantNodeInfo: framework.WrapSchedulerNodeInfo(noDraNodeInfo, nil, nil),
},
{
testName: "ResourceSlices added to the wrapper",
schedNodeInfo: resourceSlicesNodeInfo,
wantNodeInfo: framework.WrapSchedulerNodeInfo(resourceSlicesNodeInfo, []*resourceapi.ResourceSlice{node1Slice1, node1Slice2}, nil),
},
{
testName: "ResourceClaims added to the wrapper",
schedNodeInfo: resourceClaimsNodeInfo,
wantNodeInfo: framework.WrapSchedulerNodeInfo(resourceClaimsNodeInfo, nil, map[types.UID]framework.PodExtraInfo{
"pod1": {NeededResourceClaims: []*resourceapi.ResourceClaim{pod1OwnClaim1, pod1OwnClaim2, sharedClaim1, sharedClaim2}},
"pod2": {NeededResourceClaims: []*resourceapi.ResourceClaim{pod2OwnClaim1, sharedClaim1, sharedClaim3}},
}),
},
{
testName: "ResourceSlices and ResourceClaims added to the wrapper",
schedNodeInfo: fullDraNodeInfo,
wantNodeInfo: framework.WrapSchedulerNodeInfo(fullDraNodeInfo, []*resourceapi.ResourceSlice{node1Slice1, node1Slice2}, map[types.UID]framework.PodExtraInfo{
"pod1": {NeededResourceClaims: []*resourceapi.ResourceClaim{pod1OwnClaim1, pod1OwnClaim2, sharedClaim1, sharedClaim2}},
"pod2": {NeededResourceClaims: []*resourceapi.ResourceClaim{pod2OwnClaim1, sharedClaim1, sharedClaim3}},
}),
},
{
testName: "pod in NodeInfo with a missing claim is an error",
schedNodeInfo: missingClaimNodeInfo,
wantNodeInfo: nil,
wantErr: cmpopts.AnyError,
},
} {
t.Run(tc.testName, func(t *testing.T) {
claims := map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): sharedClaim1,
GetClaimId(sharedClaim2): sharedClaim2,
GetClaimId(sharedClaim3): sharedClaim3,
GetClaimId(pod2OwnClaim1): pod2OwnClaim1,
GetClaimId(pod1OwnClaim1): pod1OwnClaim1,
GetClaimId(pod1OwnClaim2): pod1OwnClaim2,
}
localSlices := map[string][]*resourceapi.ResourceSlice{
"node1": {node1Slice1, node1Slice2},
"node2": {node2Slice1, node2Slice2},
}
globalSlices := []*resourceapi.ResourceSlice{globalSlice1, globalSlice2}
snapshot := NewSnapshot(claims, localSlices, globalSlices, nil)
nodeInfo, err := snapshot.WrapSchedulerNodeInfo(tc.schedNodeInfo)
if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
t.Fatalf("Snapshot.WrapSchedulerNodeInfo(): unexpected error (-want +got): %s", diff)
}
cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
test.IgnoreObjectOrder[*resourceapi.ResourceClaim](), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()}
if diff := cmp.Diff(tc.wantNodeInfo, nodeInfo, cmpOpts...); diff != "" {
t.Errorf("Snapshot.WrapSchedulerNodeInfo(): unexpected output (-want +got): %s", diff)
}
})
}
}
func TestSnapshotClone(t *testing.T) {
for _, tc := range []struct {
testName string
snapshot Snapshot
cloneModFun func(snapshot Snapshot) error
wantModifiedClaims []*resourceapi.ResourceClaim
wantModifiedSlices []*resourceapi.ResourceSlice
}{
{
testName: "empty snapshot",
snapshot: Snapshot{},
cloneModFun: func(snapshot Snapshot) error {
if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
return err
}
return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1, node1Slice2})
},
wantModifiedClaims: []*resourceapi.ResourceClaim{pod1OwnClaim1, pod1OwnClaim2},
wantModifiedSlices: []*resourceapi.ResourceSlice{node1Slice1, node1Slice2},
},
{
testName: "non-empty snapshot",
snapshot: NewSnapshot(
map[ResourceClaimId]*resourceapi.ResourceClaim{
GetClaimId(sharedClaim1): drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
GetClaimId(sharedClaim2): sharedClaim2.DeepCopy(),
GetClaimId(sharedClaim3): drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
GetClaimId(pod2OwnClaim1): drautils.TestClaimWithPodOwnership(pod2, drautils.TestClaimWithPodReservations(pod2OwnClaim1, pod2)),
},
map[string][]*resourceapi.ResourceSlice{
"node1": {node1Slice1, node1Slice2},
"node2": {node2Slice1, node2Slice2},
},
[]*resourceapi.ResourceSlice{globalSlice1, globalSlice2}, nil),
cloneModFun: func(snapshot Snapshot) error {
if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{node3Slice1, node3Slice2}); err != nil {
return err
}
if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
return err
}
if err := snapshot.ReservePodClaims(pod1); err != nil {
return err
}
snapshot.RemovePodOwnedClaims(pod2)
snapshot.RemoveNodeResourceSlices("node1")
return nil
},
wantModifiedSlices: []*resourceapi.ResourceSlice{node2Slice1, node2Slice2, node3Slice1, node3Slice2, globalSlice1, globalSlice2},
wantModifiedClaims: []*resourceapi.ResourceClaim{
drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
drautils.TestClaimWithPodReservations(pod1OwnClaim2, pod1),
drautils.TestClaimWithPodReservations(sharedClaim1, pod1),
drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
sharedClaim3,
},
},
} {
t.Run(tc.testName, func(t *testing.T) {
// Grab the initial state of the snapshot to verify that it doesn't change when the clone is modified.
initialClaims, err := tc.snapshot.ResourceClaims().List()
if err != nil {
t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
}
initialSlices, err := tc.snapshot.ResourceSlices().List()
if err != nil {
t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
}
// Clone and verify that the clone is identical to the original.
snapshotClone := tc.snapshot.Clone()
if diff := cmp.Diff(tc.snapshot, snapshotClone, cmpopts.EquateEmpty(), cmp.AllowUnexported(Snapshot{}, framework.NodeInfo{}, schedulerframework.NodeInfo{})); diff != "" {
t.Fatalf("Snapshot.Clone(): snapshot not identical after cloning (-want +got): %s", diff)
}
// Modify the clone.
if err := tc.cloneModFun(snapshotClone); err != nil {
t.Fatalf("Snapshot: unexpected error during snapshot modification: %v", err)
}
// Verify that the clone changed as expected.
modifiedClaims, err := snapshotClone.ResourceClaims().List()
if err != nil {
t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
}
modifiedSlices, err := snapshotClone.ResourceSlices().List()
if err != nil {
t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
}
if diff := cmp.Diff(tc.wantModifiedClaims, modifiedClaims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
t.Errorf("Snapshot: unexpected ResourceClaim state after modifications (-want +got): %s", diff)
}
if diff := cmp.Diff(tc.wantModifiedSlices, modifiedSlices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
t.Errorf("Snapshot: unexpected ResourceSlice state after modifications (-want +got): %s", diff)
}
// Verify that the original hasn't changed during clone modifications.
initialClaimsAfterCloneMod, err := tc.snapshot.ResourceClaims().List()
if err != nil {
t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
}
initialSlicesAfterCloneMod, err := tc.snapshot.ResourceSlices().List()
if err != nil {
t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
}
if diff := cmp.Diff(initialClaims, initialClaimsAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
t.Errorf("Snapshot: ResourceClaim state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
}
if diff := cmp.Diff(initialSlices, initialSlicesAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
t.Errorf("Snapshot: ResourceSlice state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
}
})
}
}
func testPods(count int) []*apiv1.Pod {
var result []*apiv1.Pod
for i := range count {
result = append(result, test.BuildTestPod(fmt.Sprintf("test-pod-%d", i), 1, 1))
}
return result
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package utils
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
@ -723,13 +722,3 @@ func TestAddPodReservationInPlace(t *testing.T) {
})
}
}
func testClaimReservations(count int) []resourceapi.ResourceClaimConsumerReference {
var result []resourceapi.ResourceClaimConsumerReference
for i := range count {
podName := fmt.Sprintf("pod-%d", i)
result = append(result, resourceapi.ResourceClaimConsumerReference{Resource: "pods",
Name: podName, UID: types.UID(podName + "Uid")})
}
return result
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
apiv1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// TestClaimWithPodOwnership returns a copy of the provided claim with OwnerReferences set up so that
// the claim is owned by the provided pod.
func TestClaimWithPodOwnership(pod *apiv1.Pod, claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
result := claim.DeepCopy()
result.OwnerReferences = []metav1.OwnerReference{PodClaimOwnerReference(pod)}
return result
}
// TestClaimWithPodReservations returns a copy of the provided claim with reservations for the provided pods added
// to ReservedFor.
func TestClaimWithPodReservations(claim *resourceapi.ResourceClaim, pods ...*apiv1.Pod) *resourceapi.ResourceClaim {
result := claim.DeepCopy()
for _, pod := range pods {
result.Status.ReservedFor = append(result.Status.ReservedFor, PodClaimConsumerReference(pod))
}
return result
}
// TestClaimWithAllocation returns a copy of the provided claim with an allocation set.
func TestClaimWithAllocation(claim *resourceapi.ResourceClaim, allocation *resourceapi.AllocationResult) *resourceapi.ResourceClaim {
result := claim.DeepCopy()
defaultAlloc := &resourceapi.AllocationResult{
Devices: resourceapi.DeviceAllocationResult{
Results: []resourceapi.DeviceRequestAllocationResult{
{Request: "req1", Driver: "driver.example.com", Pool: "pool1", Device: "device1"},
},
},
}
if allocation == nil {
allocation = defaultAlloc
}
result.Status.Allocation = allocation
return result
}
func testClaimReservations(count int) []resourceapi.ResourceClaimConsumerReference {
var result []resourceapi.ResourceClaimConsumerReference
for i := range count {
podName := fmt.Sprintf("pod-%d", i)
result = append(result, resourceapi.ResourceClaimConsumerReference{Resource: "pods",
Name: podName, UID: types.UID(podName + "Uid")})
}
return result
}

View File

@ -23,6 +23,8 @@ import (
"strings"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/mock"
apiv1 "k8s.io/api/core/v1"
@ -526,3 +528,11 @@ func (l *HttpServerMock) handle(req *http.Request, w http.ResponseWriter, server
}
return response
}
// IgnoreObjectOrder returns a cmp.Option that ignores the order of elements when comparing slices of K8s objects of type T,
// depending on their GetName() function for sorting.
func IgnoreObjectOrder[T interface{ GetName() string }]() cmp.Option {
return cmpopts.SortSlices(func(c1, c2 T) bool {
return c1.GetName() < c2.GetName()
})
}