DRA: introduce internal NodeInfo/PodInfo with DRA objects attached
Methods to interact with the new internal types are added to ClusterSnapshot. Cluster Autoscaler code will be migrated to only use these methods and work on the internal types instead of directly using the framework types. The new types are designed so that they can be used as close to the framework types as possible, which should make the migration manageable. This allows easily adding additional data to the Nodes and Pods tracked in ClusterSnapshot, without having to change the scheduler framework. This will be needed to support DRA, as we'll need to track ResourceSlices and ResourceClaims.
This commit is contained in:
		
							parent
							
								
									358f8c0d21
								
							
						
					
					
						commit
						a329ac6601
					
				| 
						 | 
				
			
			@ -20,6 +20,7 @@ import (
 | 
			
		|||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 | 
			
		||||
)
 | 
			
		||||
| 
						 | 
				
			
			@ -35,12 +36,12 @@ type internalBasicSnapshotData struct {
 | 
			
		|||
	pvcNamespacePodMap map[string]map[string]bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (data *internalBasicSnapshotData) listNodeInfos() ([]*schedulerframework.NodeInfo, error) {
 | 
			
		||||
func (data *internalBasicSnapshotData) listNodeInfos() []*schedulerframework.NodeInfo {
 | 
			
		||||
	nodeInfoList := make([]*schedulerframework.NodeInfo, 0, len(data.nodeInfoMap))
 | 
			
		||||
	for _, v := range data.nodeInfoMap {
 | 
			
		||||
		nodeInfoList = append(nodeInfoList, v)
 | 
			
		||||
	}
 | 
			
		||||
	return nodeInfoList, nil
 | 
			
		||||
	return nodeInfoList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (data *internalBasicSnapshotData) listNodeInfosThatHavePodsWithAffinityList() ([]*schedulerframework.NodeInfo, error) {
 | 
			
		||||
| 
						 | 
				
			
			@ -212,6 +213,34 @@ func (snapshot *BasicClusterSnapshot) getInternalData() *internalBasicSnapshotDa
 | 
			
		|||
	return snapshot.data[len(snapshot.data)-1]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetNodeInfo gets a NodeInfo.
 | 
			
		||||
func (snapshot *BasicClusterSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) {
 | 
			
		||||
	schedNodeInfo, err := snapshot.getInternalData().getNodeInfo(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return framework.WrapSchedulerNodeInfo(schedNodeInfo), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListNodeInfos lists NodeInfos.
 | 
			
		||||
func (snapshot *BasicClusterSnapshot) ListNodeInfos() ([]*framework.NodeInfo, error) {
 | 
			
		||||
	schedNodeInfos := snapshot.getInternalData().listNodeInfos()
 | 
			
		||||
	return framework.WrapSchedulerNodeInfos(schedNodeInfos), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddNodeInfo adds a NodeInfo.
 | 
			
		||||
func (snapshot *BasicClusterSnapshot) AddNodeInfo(nodeInfo *framework.NodeInfo) error {
 | 
			
		||||
	if err := snapshot.getInternalData().addNode(nodeInfo.Node()); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	for _, podInfo := range nodeInfo.Pods() {
 | 
			
		||||
		if err := snapshot.getInternalData().addPod(podInfo.Pod, nodeInfo.Node().Name); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddNode adds node to the snapshot.
 | 
			
		||||
func (snapshot *BasicClusterSnapshot) AddNode(node *apiv1.Node) error {
 | 
			
		||||
	return snapshot.getInternalData().addNode(node)
 | 
			
		||||
| 
						 | 
				
			
			@ -302,7 +331,7 @@ func (snapshot *BasicClusterSnapshot) StorageInfos() schedulerframework.StorageI
 | 
			
		|||
 | 
			
		||||
// List returns the list of nodes in the snapshot.
 | 
			
		||||
func (snapshot *basicClusterSnapshotNodeLister) List() ([]*schedulerframework.NodeInfo, error) {
 | 
			
		||||
	return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfos()
 | 
			
		||||
	return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfos(), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,6 +20,7 @@ import (
 | 
			
		|||
	"errors"
 | 
			
		||||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 | 
			
		||||
)
 | 
			
		||||
| 
						 | 
				
			
			@ -43,6 +44,16 @@ type ClusterSnapshot interface {
 | 
			
		|||
	// IsPVCUsedByPods returns if the pvc is used by any pod, key = <namespace>/<pvc_name>
 | 
			
		||||
	IsPVCUsedByPods(key string) bool
 | 
			
		||||
 | 
			
		||||
	// AddNodeInfo adds the given NodeInfo to the snapshot. The Node and the Pods are added, as well as
 | 
			
		||||
	// any DRA objects passed along them.
 | 
			
		||||
	AddNodeInfo(nodeInfo *framework.NodeInfo) error
 | 
			
		||||
	// GetNodeInfo returns an internal NodeInfo for a given Node - all information about the Node tracked in the snapshot.
 | 
			
		||||
	// This means the Node itself, its scheduled Pods, as well as all relevant DRA objects. The internal NodeInfos
 | 
			
		||||
	// obtained via this method should always be used in CA code instead of directly using *schedulerframework.NodeInfo.
 | 
			
		||||
	GetNodeInfo(nodeName string) (*framework.NodeInfo, error)
 | 
			
		||||
	// ListNodeInfos returns internal NodeInfos for all Nodes tracked in the snapshot. See the comment on GetNodeInfo.
 | 
			
		||||
	ListNodeInfos() ([]*framework.NodeInfo, error)
 | 
			
		||||
 | 
			
		||||
	// Fork creates a fork of snapshot state. All modifications can later be reverted to moment of forking via Revert().
 | 
			
		||||
	// Use WithForkedSnapshot() helper function instead if possible.
 | 
			
		||||
	Fork()
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,6 +20,7 @@ import (
 | 
			
		|||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 | 
			
		||||
)
 | 
			
		||||
| 
						 | 
				
			
			@ -402,6 +403,34 @@ func NewDeltaClusterSnapshot() *DeltaClusterSnapshot {
 | 
			
		|||
	return snapshot
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetNodeInfo gets a NodeInfo.
 | 
			
		||||
func (snapshot *DeltaClusterSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) {
 | 
			
		||||
	schedNodeInfo, err := snapshot.getNodeInfo(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return framework.WrapSchedulerNodeInfo(schedNodeInfo), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListNodeInfos lists NodeInfos.
 | 
			
		||||
func (snapshot *DeltaClusterSnapshot) ListNodeInfos() ([]*framework.NodeInfo, error) {
 | 
			
		||||
	schedNodeInfos := snapshot.data.getNodeInfoList()
 | 
			
		||||
	return framework.WrapSchedulerNodeInfos(schedNodeInfos), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddNodeInfo adds a NodeInfo.
 | 
			
		||||
func (snapshot *DeltaClusterSnapshot) AddNodeInfo(nodeInfo *framework.NodeInfo) error {
 | 
			
		||||
	if err := snapshot.data.addNode(nodeInfo.Node()); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	for _, podInfo := range nodeInfo.Pods() {
 | 
			
		||||
		if err := snapshot.data.addPod(podInfo.Pod, nodeInfo.Node().Name); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddNode adds node to the snapshot.
 | 
			
		||||
func (snapshot *DeltaClusterSnapshot) AddNode(node *apiv1.Node) error {
 | 
			
		||||
	return snapshot.data.addNode(node)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -0,0 +1,127 @@
 | 
			
		|||
/*
 | 
			
		||||
Copyright 2024 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package framework
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	resourceapi "k8s.io/api/resource/v1alpha3"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// PodInfo contains all necessary information about a Pod that Cluster Autoscaler needs to track.
 | 
			
		||||
type PodInfo struct {
 | 
			
		||||
	// This type embeds *apiv1.Pod to make the accesses easier - most of the code just needs to access the Pod.
 | 
			
		||||
	*apiv1.Pod
 | 
			
		||||
 | 
			
		||||
	// NeededResourceClaims contains ResourceClaim objects needed by the Pod.
 | 
			
		||||
	NeededResourceClaims []*resourceapi.ResourceClaim
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type podExtraInfo struct {
 | 
			
		||||
	neededResourceClaims []*resourceapi.ResourceClaim
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NodeInfo contains all necessary information about a Node that Cluster Autoscaler needs to track.
 | 
			
		||||
// It's essentially a wrapper around schedulerframework.NodeInfo, with extra data on top.
 | 
			
		||||
type NodeInfo struct {
 | 
			
		||||
	// schedNodeInfo is the part of information needed by the scheduler.
 | 
			
		||||
	schedNodeInfo *schedulerframework.NodeInfo
 | 
			
		||||
	// podsExtraInfo contains extra pod-level data needed only by CA.
 | 
			
		||||
	podsExtraInfo map[types.UID]podExtraInfo
 | 
			
		||||
 | 
			
		||||
	// Extra node-level data needed only by CA below.
 | 
			
		||||
 | 
			
		||||
	// LocalResourceSlices contains all node-local ResourceSlices exposed by this Node.
 | 
			
		||||
	LocalResourceSlices []*resourceapi.ResourceSlice
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetNode sets the Node in this NodeInfo
 | 
			
		||||
func (n *NodeInfo) SetNode(node *apiv1.Node) {
 | 
			
		||||
	n.schedNodeInfo.SetNode(node)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Node returns the Node set in this NodeInfo.
 | 
			
		||||
func (n *NodeInfo) Node() *apiv1.Node {
 | 
			
		||||
	return n.schedNodeInfo.Node()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Pods returns the Pods scheduled on this NodeInfo, along with all their associated data.
 | 
			
		||||
func (n *NodeInfo) Pods() []*PodInfo {
 | 
			
		||||
	var result []*PodInfo
 | 
			
		||||
	for _, pod := range n.schedNodeInfo.Pods {
 | 
			
		||||
		extraInfo := n.podsExtraInfo[pod.Pod.UID]
 | 
			
		||||
		podInfo := &PodInfo{Pod: pod.Pod, NeededResourceClaims: extraInfo.neededResourceClaims}
 | 
			
		||||
		result = append(result, podInfo)
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddPod adds the given Pod and associated data to the NodeInfo.
 | 
			
		||||
func (n *NodeInfo) AddPod(pod *PodInfo) {
 | 
			
		||||
	n.schedNodeInfo.AddPod(pod.Pod)
 | 
			
		||||
	n.podsExtraInfo[pod.UID] = podExtraInfo{neededResourceClaims: pod.NeededResourceClaims}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RemovePod removes the given pod and its associated data from the NodeInfo.
 | 
			
		||||
func (n *NodeInfo) RemovePod(pod *apiv1.Pod) error {
 | 
			
		||||
	err := n.schedNodeInfo.RemovePod(klog.Background(), pod)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	delete(n.podsExtraInfo, pod.UID)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ToScheduler returns the embedded *schedulerframework.NodeInfo portion of the tracked data.
 | 
			
		||||
func (n *NodeInfo) ToScheduler() *schedulerframework.NodeInfo {
 | 
			
		||||
	return n.schedNodeInfo
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewNodeInfo returns a new internal NodeInfo from the provided data.
 | 
			
		||||
func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...*PodInfo) *NodeInfo {
 | 
			
		||||
	result := &NodeInfo{
 | 
			
		||||
		schedNodeInfo:       schedulerframework.NewNodeInfo(),
 | 
			
		||||
		podsExtraInfo:       map[types.UID]podExtraInfo{},
 | 
			
		||||
		LocalResourceSlices: slices,
 | 
			
		||||
	}
 | 
			
		||||
	if node != nil {
 | 
			
		||||
		result.schedNodeInfo.SetNode(node)
 | 
			
		||||
	}
 | 
			
		||||
	for _, pod := range pods {
 | 
			
		||||
		result.AddPod(pod)
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WrapSchedulerNodeInfo wraps a *schedulerframework.NodeInfo into an internal *NodeInfo.
 | 
			
		||||
func WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
	return &NodeInfo{
 | 
			
		||||
		schedNodeInfo: schedNodeInfo,
 | 
			
		||||
		podsExtraInfo: map[types.UID]podExtraInfo{},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WrapSchedulerNodeInfos wraps a list of *schedulerframework.NodeInfos into internal *NodeInfos.
 | 
			
		||||
func WrapSchedulerNodeInfos(schedNodeInfos []*schedulerframework.NodeInfo) []*NodeInfo {
 | 
			
		||||
	var result []*NodeInfo
 | 
			
		||||
	for _, schedNodeInfo := range schedNodeInfos {
 | 
			
		||||
		result = append(result, WrapSchedulerNodeInfo(schedNodeInfo))
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,241 @@
 | 
			
		|||
/*
 | 
			
		||||
Copyright 2024 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package framework
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/go-cmp/cmp"
 | 
			
		||||
	"github.com/google/go-cmp/cmp/cmpopts"
 | 
			
		||||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	resourceapi "k8s.io/api/resource/v1alpha3"
 | 
			
		||||
	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 | 
			
		||||
	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestNodeInfo(t *testing.T) {
 | 
			
		||||
	node := test.BuildTestNode("test-node", 1000, 1024)
 | 
			
		||||
	pods := []*apiv1.Pod{
 | 
			
		||||
		// Use pods requesting host-ports to make sure that NodeInfo fields other than node and Pods also
 | 
			
		||||
		// get set correctly (in this case - the UsedPorts field).
 | 
			
		||||
		test.BuildTestPod("hostport-pod-0", 100, 16, test.WithHostPort(1337)),
 | 
			
		||||
		test.BuildTestPod("hostport-pod-1", 100, 16, test.WithHostPort(1338)),
 | 
			
		||||
		test.BuildTestPod("hostport-pod-2", 100, 16, test.WithHostPort(1339)),
 | 
			
		||||
		test.BuildTestPod("regular-pod-0", 100, 16),
 | 
			
		||||
		test.BuildTestPod("regular-pod-1", 100, 16),
 | 
			
		||||
		test.BuildTestPod("regular-pod-2", 100, 16),
 | 
			
		||||
	}
 | 
			
		||||
	schedulerNodeInfo := newSchedNodeInfo(node, pods)
 | 
			
		||||
	slices := []*resourceapi.ResourceSlice{
 | 
			
		||||
		{
 | 
			
		||||
			ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
				Name: "test-node-slice-0",
 | 
			
		||||
			},
 | 
			
		||||
			Spec: resourceapi.ResourceSliceSpec{
 | 
			
		||||
				NodeName: "test-node",
 | 
			
		||||
				Driver:   "test.driver.com",
 | 
			
		||||
				Pool:     resourceapi.ResourcePool{Name: "test-node", Generation: 13, ResourceSliceCount: 2},
 | 
			
		||||
				Devices:  []resourceapi.Device{{Name: "device-0"}, {Name: "device-1"}},
 | 
			
		||||
			}},
 | 
			
		||||
		{
 | 
			
		||||
			ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
				Name: "test-node-slice-1",
 | 
			
		||||
			},
 | 
			
		||||
			Spec: resourceapi.ResourceSliceSpec{
 | 
			
		||||
				NodeName: "test-node",
 | 
			
		||||
				Driver:   "test.driver.com",
 | 
			
		||||
				Pool:     resourceapi.ResourcePool{Name: "test-node", Generation: 13, ResourceSliceCount: 2},
 | 
			
		||||
				Devices:  []resourceapi.Device{{Name: "device-2"}, {Name: "device-3"}},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tc := range []struct {
 | 
			
		||||
		testName                string
 | 
			
		||||
		modFn                   func(info *schedulerframework.NodeInfo) *NodeInfo
 | 
			
		||||
		wantSchedNodeInfo       *schedulerframework.NodeInfo
 | 
			
		||||
		wantLocalResourceSlices []*resourceapi.ResourceSlice
 | 
			
		||||
		wantPods                []*PodInfo
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			testName: "wrapping via NewNodeInfo",
 | 
			
		||||
			modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
				return NewNodeInfo(info.Node(), nil, testPodInfos(pods, false)...)
 | 
			
		||||
			},
 | 
			
		||||
			wantSchedNodeInfo: schedulerNodeInfo,
 | 
			
		||||
			wantPods:          testPodInfos(pods, false),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testName: "wrapping via NewNodeInfo with DRA objects",
 | 
			
		||||
			modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
				return NewNodeInfo(info.Node(), slices, testPodInfos(pods, true)...)
 | 
			
		||||
			},
 | 
			
		||||
			wantSchedNodeInfo:       schedulerNodeInfo,
 | 
			
		||||
			wantLocalResourceSlices: slices,
 | 
			
		||||
			wantPods:                testPodInfos(pods, true),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testName: "wrapping via NewTestNodeInfo",
 | 
			
		||||
			modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
				var pods []*apiv1.Pod
 | 
			
		||||
				for _, pod := range info.Pods {
 | 
			
		||||
					pods = append(pods, pod.Pod)
 | 
			
		||||
				}
 | 
			
		||||
				return NewTestNodeInfo(info.Node(), pods...)
 | 
			
		||||
			},
 | 
			
		||||
			wantSchedNodeInfo: schedulerNodeInfo,
 | 
			
		||||
			wantPods:          testPodInfos(pods, false),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testName:          "wrapping via WrapSchedulerNodeInfo",
 | 
			
		||||
			modFn:             WrapSchedulerNodeInfo,
 | 
			
		||||
			wantSchedNodeInfo: schedulerNodeInfo,
 | 
			
		||||
			wantPods:          testPodInfos(pods, false),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testName: "wrapping via SetNode+AddPod",
 | 
			
		||||
			modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
				result := NewNodeInfo(nil, nil)
 | 
			
		||||
				result.SetNode(info.Node())
 | 
			
		||||
				for _, pod := range info.Pods {
 | 
			
		||||
					result.AddPod(&PodInfo{Pod: pod.Pod})
 | 
			
		||||
				}
 | 
			
		||||
				return result
 | 
			
		||||
			},
 | 
			
		||||
			wantSchedNodeInfo: schedulerNodeInfo,
 | 
			
		||||
			wantPods:          testPodInfos(pods, false),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testName: "wrapping via SetNode+AddPod with DRA objects",
 | 
			
		||||
			modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
				result := NewNodeInfo(nil, nil)
 | 
			
		||||
				result.LocalResourceSlices = slices
 | 
			
		||||
				result.SetNode(info.Node())
 | 
			
		||||
				for _, podInfo := range testPodInfos(pods, true) {
 | 
			
		||||
					result.AddPod(podInfo)
 | 
			
		||||
				}
 | 
			
		||||
				return result
 | 
			
		||||
			},
 | 
			
		||||
			wantSchedNodeInfo:       schedulerNodeInfo,
 | 
			
		||||
			wantLocalResourceSlices: slices,
 | 
			
		||||
			wantPods:                testPodInfos(pods, true),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testName: "removing pods",
 | 
			
		||||
			modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
 | 
			
		||||
				result := NewNodeInfo(info.Node(), slices, testPodInfos(pods, true)...)
 | 
			
		||||
				for _, pod := range []*apiv1.Pod{pods[0], pods[2], pods[4]} {
 | 
			
		||||
					if err := result.RemovePod(pod); err != nil {
 | 
			
		||||
						t.Errorf("RemovePod unexpected error: %v", err)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				return result
 | 
			
		||||
			},
 | 
			
		||||
			wantSchedNodeInfo:       newSchedNodeInfo(node, []*apiv1.Pod{pods[1], pods[3], pods[5]}),
 | 
			
		||||
			wantLocalResourceSlices: slices,
 | 
			
		||||
			wantPods:                testPodInfos([]*apiv1.Pod{pods[1], pods[3], pods[5]}, true),
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		t.Run(tc.testName, func(t *testing.T) {
 | 
			
		||||
			wrappedNodeInfo := tc.modFn(schedulerNodeInfo)
 | 
			
		||||
 | 
			
		||||
			// Assert that the scheduler NodeInfo object is as expected.
 | 
			
		||||
			nodeInfoCmpOpts := []cmp.Option{
 | 
			
		||||
				// The Node is the only unexported field in this type, and we want to compare it.
 | 
			
		||||
				cmp.AllowUnexported(schedulerframework.NodeInfo{}),
 | 
			
		||||
				// Generation is expected to be different.
 | 
			
		||||
				cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
 | 
			
		||||
				// The pod order changes in a particular way whenever schedulerframework.RemovePod() is called. Instead of
 | 
			
		||||
				// relying on that schedulerframework implementation detail in assertions, just ignore the order.
 | 
			
		||||
				cmpopts.SortSlices(func(p1, p2 *schedulerframework.PodInfo) bool {
 | 
			
		||||
					return p1.Pod.Name < p2.Pod.Name
 | 
			
		||||
				}),
 | 
			
		||||
			}
 | 
			
		||||
			if diff := cmp.Diff(tc.wantSchedNodeInfo, wrappedNodeInfo.ToScheduler(), nodeInfoCmpOpts...); diff != "" {
 | 
			
		||||
				t.Errorf("ToScheduler() output differs from expected, diff (-want +got): %s", diff)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Assert that the Node() method matches the scheduler object.
 | 
			
		||||
			if diff := cmp.Diff(tc.wantSchedNodeInfo.Node(), wrappedNodeInfo.Node()); diff != "" {
 | 
			
		||||
				t.Errorf("Node() output differs from expected, diff  (-want +got): %s", diff)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Assert that LocalResourceSlices are as expected.
 | 
			
		||||
			if diff := cmp.Diff(tc.wantLocalResourceSlices, wrappedNodeInfo.LocalResourceSlices); diff != "" {
 | 
			
		||||
				t.Errorf("LocalResourceSlices differ from expected, diff  (-want +got): %s", diff)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Assert that the pods list in the wrapper is as expected.
 | 
			
		||||
			// The pod order changes in a particular way whenever schedulerframework.RemovePod() is called. Instead of
 | 
			
		||||
			// relying on that schedulerframework implementation detail in assertions, just ignore the order.
 | 
			
		||||
			podsInfosIgnoreOrderOpt := cmpopts.SortSlices(func(p1, p2 *PodInfo) bool {
 | 
			
		||||
				return p1.Name < p2.Name
 | 
			
		||||
			})
 | 
			
		||||
			if diff := cmp.Diff(tc.wantPods, wrappedNodeInfo.Pods(), podsInfosIgnoreOrderOpt); diff != "" {
 | 
			
		||||
				t.Errorf("Pods() output differs from expected, diff (-want +got): %s", diff)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Assert that the extra info map only contains information about pods in the list. This verifies that
 | 
			
		||||
			// the map is properly cleaned up during RemovePod.
 | 
			
		||||
			possiblePodUids := make(map[types.UID]bool)
 | 
			
		||||
			for _, pod := range tc.wantPods {
 | 
			
		||||
				possiblePodUids[pod.UID] = true
 | 
			
		||||
			}
 | 
			
		||||
			for podUid := range wrappedNodeInfo.podsExtraInfo {
 | 
			
		||||
				if !possiblePodUids[podUid] {
 | 
			
		||||
					t.Errorf("podsExtraInfo contains entry for unexpected UID %q", podUid)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testPodInfos(pods []*apiv1.Pod, addClaims bool) []*PodInfo {
 | 
			
		||||
	var result []*PodInfo
 | 
			
		||||
	for _, pod := range pods {
 | 
			
		||||
		podInfo := &PodInfo{Pod: pod}
 | 
			
		||||
		if addClaims {
 | 
			
		||||
			for i := range 3 {
 | 
			
		||||
				podInfo.NeededResourceClaims = append(podInfo.NeededResourceClaims, &resourceapi.ResourceClaim{
 | 
			
		||||
					ObjectMeta: v1.ObjectMeta{
 | 
			
		||||
						Name: fmt.Sprintf("%s-claim-%d", pod.Name, i),
 | 
			
		||||
					},
 | 
			
		||||
					Spec: resourceapi.ResourceClaimSpec{
 | 
			
		||||
						Devices: resourceapi.DeviceClaim{
 | 
			
		||||
							Requests: []resourceapi.DeviceRequest{
 | 
			
		||||
								{Name: "request-0"},
 | 
			
		||||
								{Name: "request-1"},
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				})
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		result = append(result, podInfo)
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newSchedNodeInfo(node *apiv1.Node, pods []*apiv1.Pod) *schedulerframework.NodeInfo {
 | 
			
		||||
	result := schedulerframework.NewNodeInfo(pods...)
 | 
			
		||||
	result.SetNode(node)
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,31 @@
 | 
			
		|||
/*
 | 
			
		||||
Copyright 2024 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package framework
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewTestNodeInfo returns a new NodeInfo without any DRA information - only to be used in test code.
 | 
			
		||||
// Production code should always take DRA objects into account.
 | 
			
		||||
func NewTestNodeInfo(node *apiv1.Node, pods ...*apiv1.Pod) *NodeInfo {
 | 
			
		||||
	nodeInfo := NewNodeInfo(node, nil)
 | 
			
		||||
	for _, pod := range pods {
 | 
			
		||||
		nodeInfo.AddPod(&PodInfo{Pod: pod, NeededResourceClaims: nil})
 | 
			
		||||
	}
 | 
			
		||||
	return nodeInfo
 | 
			
		||||
}
 | 
			
		||||
		Loading…
	
		Reference in New Issue