nit: when scheduling fails on topology constraints, skip the last node that failed scheduling
Signed-off-by: MenD32 <amit.mendelevitch@gmail.com>
This commit is contained in:
parent
8fd9e1f04d
commit
0002157b3a
|
|
@ -190,7 +190,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
|
|||
// If the pod can't be scheduled on the last node because of topology constraints, we can stop binpacking.
|
||||
// The pod can't be scheduled on any new node either, because it has the same topology constraints.
|
||||
nodeName, err := e.clusterSnapshot.SchedulePodOnAnyNodeMatching(pod, func(nodeInfo *framework.NodeInfo) bool {
|
||||
return true // Node scale-up can cause old nodes to become schedulable, so we check all nodes.
|
||||
return nodeInfo.Node().Name != estimationState.lastNodeName // only skip the last node that failed scheduling
|
||||
})
|
||||
if err != nil && err.Type() == clustersnapshot.SchedulingInternalError {
|
||||
// Unexpected error.
|
||||
|
|
|
|||
Loading…
Reference in New Issue