CA: extend WrapSchedulerNodeInfo to allow passing DRA objects
This should be a no-op, as no DRA objects are passed yet.
This commit is contained in:
parent
d0338fa301
commit
1e560274d5
|
@ -211,13 +211,17 @@ func (snapshot *BasicSnapshotStore) GetNodeInfo(nodeName string) (*framework.Nod
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return framework.WrapSchedulerNodeInfo(schedNodeInfo), nil
|
return framework.WrapSchedulerNodeInfo(schedNodeInfo, nil, nil), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListNodeInfos lists NodeInfos.
|
// ListNodeInfos lists NodeInfos.
|
||||||
func (snapshot *BasicSnapshotStore) ListNodeInfos() ([]*framework.NodeInfo, error) {
|
func (snapshot *BasicSnapshotStore) ListNodeInfos() ([]*framework.NodeInfo, error) {
|
||||||
schedNodeInfos := snapshot.getInternalData().listNodeInfos()
|
schedNodeInfos := snapshot.getInternalData().listNodeInfos()
|
||||||
return framework.WrapSchedulerNodeInfos(schedNodeInfos), nil
|
var result []*framework.NodeInfo
|
||||||
|
for _, schedNodeInfo := range schedNodeInfos {
|
||||||
|
result = append(result, framework.WrapSchedulerNodeInfo(schedNodeInfo, nil, nil))
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddNodeInfo adds a NodeInfo.
|
// AddNodeInfo adds a NodeInfo.
|
||||||
|
|
|
@ -415,13 +415,17 @@ func (snapshot *DeltaSnapshotStore) GetNodeInfo(nodeName string) (*framework.Nod
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return framework.WrapSchedulerNodeInfo(schedNodeInfo), nil
|
return framework.WrapSchedulerNodeInfo(schedNodeInfo, nil, nil), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListNodeInfos lists NodeInfos.
|
// ListNodeInfos lists NodeInfos.
|
||||||
func (snapshot *DeltaSnapshotStore) ListNodeInfos() ([]*framework.NodeInfo, error) {
|
func (snapshot *DeltaSnapshotStore) ListNodeInfos() ([]*framework.NodeInfo, error) {
|
||||||
schedNodeInfos := snapshot.data.getNodeInfoList()
|
schedNodeInfos := snapshot.data.getNodeInfoList()
|
||||||
return framework.WrapSchedulerNodeInfos(schedNodeInfos), nil
|
var result []*framework.NodeInfo
|
||||||
|
for _, schedNodeInfo := range schedNodeInfos {
|
||||||
|
result = append(result, framework.WrapSchedulerNodeInfo(schedNodeInfo, nil, nil))
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddNodeInfo adds a NodeInfo.
|
// AddNodeInfo adds a NodeInfo.
|
||||||
|
|
|
@ -28,13 +28,15 @@ import (
|
||||||
type PodInfo struct {
|
type PodInfo struct {
|
||||||
// This type embeds *apiv1.Pod to make the accesses easier - most of the code just needs to access the Pod.
|
// This type embeds *apiv1.Pod to make the accesses easier - most of the code just needs to access the Pod.
|
||||||
*apiv1.Pod
|
*apiv1.Pod
|
||||||
|
// PodExtraInfo is an embedded struct containing all additional information that CA needs to track about a Pod.
|
||||||
// NeededResourceClaims contains ResourceClaim objects needed by the Pod.
|
PodExtraInfo
|
||||||
NeededResourceClaims []*resourceapi.ResourceClaim
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type podExtraInfo struct {
|
// PodExtraInfo contains all necessary information about a Pod that Cluster Autoscaler needs to track, apart from the Pod itself.
|
||||||
neededResourceClaims []*resourceapi.ResourceClaim
|
// This is extracted from PodInfo so that it can be stored separately from the Pod.
|
||||||
|
type PodExtraInfo struct {
|
||||||
|
// NeededResourceClaims contains ResourceClaim objects needed by the Pod.
|
||||||
|
NeededResourceClaims []*resourceapi.ResourceClaim
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeInfo contains all necessary information about a Node that Cluster Autoscaler needs to track.
|
// NodeInfo contains all necessary information about a Node that Cluster Autoscaler needs to track.
|
||||||
|
@ -43,7 +45,7 @@ type NodeInfo struct {
|
||||||
// schedNodeInfo is the part of information needed by the scheduler.
|
// schedNodeInfo is the part of information needed by the scheduler.
|
||||||
schedNodeInfo *schedulerframework.NodeInfo
|
schedNodeInfo *schedulerframework.NodeInfo
|
||||||
// podsExtraInfo contains extra pod-level data needed only by CA.
|
// podsExtraInfo contains extra pod-level data needed only by CA.
|
||||||
podsExtraInfo map[types.UID]podExtraInfo
|
podsExtraInfo map[types.UID]PodExtraInfo
|
||||||
|
|
||||||
// Extra node-level data needed only by CA below.
|
// Extra node-level data needed only by CA below.
|
||||||
|
|
||||||
|
@ -66,7 +68,7 @@ func (n *NodeInfo) Pods() []*PodInfo {
|
||||||
var result []*PodInfo
|
var result []*PodInfo
|
||||||
for _, pod := range n.schedNodeInfo.Pods {
|
for _, pod := range n.schedNodeInfo.Pods {
|
||||||
extraInfo := n.podsExtraInfo[pod.Pod.UID]
|
extraInfo := n.podsExtraInfo[pod.Pod.UID]
|
||||||
podInfo := &PodInfo{Pod: pod.Pod, NeededResourceClaims: extraInfo.neededResourceClaims}
|
podInfo := &PodInfo{Pod: pod.Pod, PodExtraInfo: extraInfo}
|
||||||
result = append(result, podInfo)
|
result = append(result, podInfo)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
@ -75,7 +77,7 @@ func (n *NodeInfo) Pods() []*PodInfo {
|
||||||
// AddPod adds the given Pod and associated data to the NodeInfo.
|
// AddPod adds the given Pod and associated data to the NodeInfo.
|
||||||
func (n *NodeInfo) AddPod(pod *PodInfo) {
|
func (n *NodeInfo) AddPod(pod *PodInfo) {
|
||||||
n.schedNodeInfo.AddPod(pod.Pod)
|
n.schedNodeInfo.AddPod(pod.Pod)
|
||||||
n.podsExtraInfo[pod.UID] = podExtraInfo{neededResourceClaims: pod.NeededResourceClaims}
|
n.podsExtraInfo[pod.UID] = pod.PodExtraInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePod removes the given pod and its associated data from the NodeInfo.
|
// RemovePod removes the given pod and its associated data from the NodeInfo.
|
||||||
|
@ -101,7 +103,7 @@ func (n *NodeInfo) DeepCopy() *NodeInfo {
|
||||||
for _, claim := range podInfo.NeededResourceClaims {
|
for _, claim := range podInfo.NeededResourceClaims {
|
||||||
newClaims = append(newClaims, claim.DeepCopy())
|
newClaims = append(newClaims, claim.DeepCopy())
|
||||||
}
|
}
|
||||||
newPods = append(newPods, &PodInfo{Pod: podInfo.Pod.DeepCopy(), NeededResourceClaims: newClaims})
|
newPods = append(newPods, &PodInfo{Pod: podInfo.Pod.DeepCopy(), PodExtraInfo: PodExtraInfo{NeededResourceClaims: newClaims}})
|
||||||
}
|
}
|
||||||
var newSlices []*resourceapi.ResourceSlice
|
var newSlices []*resourceapi.ResourceSlice
|
||||||
for _, slice := range n.LocalResourceSlices {
|
for _, slice := range n.LocalResourceSlices {
|
||||||
|
@ -115,7 +117,7 @@ func (n *NodeInfo) DeepCopy() *NodeInfo {
|
||||||
func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...*PodInfo) *NodeInfo {
|
func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...*PodInfo) *NodeInfo {
|
||||||
result := &NodeInfo{
|
result := &NodeInfo{
|
||||||
schedNodeInfo: schedulerframework.NewNodeInfo(),
|
schedNodeInfo: schedulerframework.NewNodeInfo(),
|
||||||
podsExtraInfo: map[types.UID]podExtraInfo{},
|
podsExtraInfo: map[types.UID]PodExtraInfo{},
|
||||||
LocalResourceSlices: slices,
|
LocalResourceSlices: slices,
|
||||||
}
|
}
|
||||||
if node != nil {
|
if node != nil {
|
||||||
|
@ -128,18 +130,15 @@ func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...
|
||||||
}
|
}
|
||||||
|
|
||||||
// WrapSchedulerNodeInfo wraps a *schedulerframework.NodeInfo into an internal *NodeInfo.
|
// WrapSchedulerNodeInfo wraps a *schedulerframework.NodeInfo into an internal *NodeInfo.
|
||||||
func WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) *NodeInfo {
|
func WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo, slices []*resourceapi.ResourceSlice, podExtraInfos map[types.UID]PodExtraInfo) *NodeInfo {
|
||||||
return &NodeInfo{
|
return &NodeInfo{
|
||||||
schedNodeInfo: schedNodeInfo,
|
schedNodeInfo: schedNodeInfo,
|
||||||
podsExtraInfo: map[types.UID]podExtraInfo{},
|
podsExtraInfo: podExtraInfos,
|
||||||
|
LocalResourceSlices: slices,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WrapSchedulerNodeInfos wraps a list of *schedulerframework.NodeInfos into internal *NodeInfos.
|
// NewPodInfo is a convenience function for creating new PodInfos without typing out the "PodExtraInfo" part.
|
||||||
func WrapSchedulerNodeInfos(schedNodeInfos []*schedulerframework.NodeInfo) []*NodeInfo {
|
func NewPodInfo(pod *apiv1.Pod, claims []*resourceapi.ResourceClaim) *PodInfo {
|
||||||
var result []*NodeInfo
|
return &PodInfo{Pod: pod, PodExtraInfo: PodExtraInfo{NeededResourceClaims: claims}}
|
||||||
for _, schedNodeInfo := range schedNodeInfos {
|
|
||||||
result = append(result, WrapSchedulerNodeInfo(schedNodeInfo))
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,11 +105,27 @@ func TestNodeInfo(t *testing.T) {
|
||||||
wantPods: testPodInfos(pods, false),
|
wantPods: testPodInfos(pods, false),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testName: "wrapping via WrapSchedulerNodeInfo",
|
testName: "wrapping via WrapSchedulerNodeInfo",
|
||||||
modFn: WrapSchedulerNodeInfo,
|
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
|
||||||
|
return WrapSchedulerNodeInfo(info, nil, nil)
|
||||||
|
},
|
||||||
wantSchedNodeInfo: schedulerNodeInfo,
|
wantSchedNodeInfo: schedulerNodeInfo,
|
||||||
wantPods: testPodInfos(pods, false),
|
wantPods: testPodInfos(pods, false),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
testName: "wrapping via WrapSchedulerNodeInfo with DRA objects",
|
||||||
|
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
|
||||||
|
podInfos := testPodInfos(pods, true)
|
||||||
|
extraInfos := make(map[types.UID]PodExtraInfo)
|
||||||
|
for _, podInfo := range podInfos {
|
||||||
|
extraInfos[podInfo.Pod.UID] = podInfo.PodExtraInfo
|
||||||
|
}
|
||||||
|
return WrapSchedulerNodeInfo(schedulerNodeInfo, slices, extraInfos)
|
||||||
|
},
|
||||||
|
wantSchedNodeInfo: schedulerNodeInfo,
|
||||||
|
wantLocalResourceSlices: slices,
|
||||||
|
wantPods: testPodInfos(pods, true),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
testName: "wrapping via SetNode+AddPod",
|
testName: "wrapping via SetNode+AddPod",
|
||||||
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
|
modFn: func(info *schedulerframework.NodeInfo) *NodeInfo {
|
||||||
|
@ -214,9 +230,11 @@ func TestDeepCopyNodeInfo(t *testing.T) {
|
||||||
{Pod: test.BuildTestPod("p1", 80, 0, test.WithNodeName(node.Name))},
|
{Pod: test.BuildTestPod("p1", 80, 0, test.WithNodeName(node.Name))},
|
||||||
{
|
{
|
||||||
Pod: test.BuildTestPod("p2", 80, 0, test.WithNodeName(node.Name)),
|
Pod: test.BuildTestPod("p2", 80, 0, test.WithNodeName(node.Name)),
|
||||||
NeededResourceClaims: []*resourceapi.ResourceClaim{
|
PodExtraInfo: PodExtraInfo{
|
||||||
{ObjectMeta: v1.ObjectMeta{Name: "claim1"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req1"}}}}},
|
NeededResourceClaims: []*resourceapi.ResourceClaim{
|
||||||
{ObjectMeta: v1.ObjectMeta{Name: "claim2"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req2"}}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "claim1"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req1"}}}}},
|
||||||
|
{ObjectMeta: v1.ObjectMeta{Name: "claim2"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req2"}}}}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -254,7 +272,7 @@ func TestDeepCopyNodeInfo(t *testing.T) {
|
||||||
// Verify that the contents are identical after copying.
|
// Verify that the contents are identical after copying.
|
||||||
nodeInfoCopy := tc.nodeInfo.DeepCopy()
|
nodeInfoCopy := tc.nodeInfo.DeepCopy()
|
||||||
if diff := cmp.Diff(tc.nodeInfo, nodeInfoCopy,
|
if diff := cmp.Diff(tc.nodeInfo, nodeInfoCopy,
|
||||||
cmp.AllowUnexported(schedulerframework.NodeInfo{}, NodeInfo{}, PodInfo{}, podExtraInfo{}),
|
cmp.AllowUnexported(schedulerframework.NodeInfo{}, NodeInfo{}),
|
||||||
// We don't care about this field staying the same, and it differs because it's a global counter bumped
|
// We don't care about this field staying the same, and it differs because it's a global counter bumped
|
||||||
// on every AddPod.
|
// on every AddPod.
|
||||||
cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
|
cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
|
||||||
|
|
|
@ -33,7 +33,7 @@ type testFailer interface {
|
||||||
func NewTestNodeInfo(node *apiv1.Node, pods ...*apiv1.Pod) *NodeInfo {
|
func NewTestNodeInfo(node *apiv1.Node, pods ...*apiv1.Pod) *NodeInfo {
|
||||||
nodeInfo := NewNodeInfo(node, nil)
|
nodeInfo := NewNodeInfo(node, nil)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
nodeInfo.AddPod(&PodInfo{Pod: pod, NeededResourceClaims: nil})
|
nodeInfo.AddPod(NewPodInfo(pod, nil))
|
||||||
}
|
}
|
||||||
return nodeInfo
|
return nodeInfo
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,7 +82,7 @@ func sanitizeNodeInfo(nodeInfo *framework.NodeInfo, newNodeNameBase string, name
|
||||||
|
|
||||||
for _, podInfo := range nodeInfo.Pods() {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
freshPod := sanitizePod(podInfo.Pod, freshNode.Name, namesSuffix)
|
freshPod := sanitizePod(podInfo.Pod, freshNode.Name, namesSuffix)
|
||||||
result.AddPod(&framework.PodInfo{Pod: freshPod})
|
result.AddPod(framework.NewPodInfo(freshPod, nil))
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue