DRA: migrate all of CA to use the new internal NodeInfo/PodInfo
The new wrapper types should behave like the direct schedulerframework types for most purposes, so most of the migration is just changing the imported package. Constructors look a bit different, so they have to be adapted - mostly in test code. Accesses to the Pods field have to be changed to a method call. After this, the schedulerframework types are only used in the new wrappers, and in the parts of simulator/ that directly interact with the scheduler framework. The rest of CA codebase operates on the new wrapper types.
This commit is contained in:
parent
a329ac6601
commit
879c6a84a4
|
|
@ -22,8 +22,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Asg implements NodeGroup interface.
|
||||
|
|
@ -179,7 +179,7 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (asg *Asg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := asg.manager.getAsgTemplate(asg.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -191,8 +191,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.id)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,10 +27,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -392,7 +392,7 @@ func (ng *AwsNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := ng.awsManager.getAsgTemplate(ng.asg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -403,8 +403,7 @@ func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.asg.Name)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,8 +34,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -477,7 +477,7 @@ func (as *AgentPool) Debug() string {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this agent pool.
|
||||
func (as *AgentPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (as *AgentPool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
|
|
@ -627,7 +627,7 @@ func (scaleSet *ScaleSet) Debug() string {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this scale set.
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := scaleSet.getVMSSFromCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -641,8 +641,7 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, erro
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(scaleSet.Name)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1120,7 +1120,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
nodeInfo, err := asg.TemplateNodeInfo()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.NotEmpty(t, nodeInfo.Pods)
|
||||
assert.NotEmpty(t, nodeInfo.Pods())
|
||||
})
|
||||
|
||||
// Properly testing dynamic SKU list through skewer is not possible,
|
||||
|
|
@ -1143,7 +1143,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.NotEmpty(t, nodeInfo.Pods)
|
||||
assert.NotEmpty(t, nodeInfo.Pods())
|
||||
})
|
||||
|
||||
t.Run("Checking static workflow if dynamic fails", func(t *testing.T) {
|
||||
|
|
@ -1164,7 +1164,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.NotEmpty(t, nodeInfo.Pods)
|
||||
assert.NotEmpty(t, nodeInfo.Pods())
|
||||
})
|
||||
|
||||
t.Run("Fails to find vmss instance information using static and dynamic workflow, instance not supported", func(t *testing.T) {
|
||||
|
|
@ -1198,7 +1198,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.NotEmpty(t, nodeInfo.Pods)
|
||||
assert.NotEmpty(t, nodeInfo.Pods())
|
||||
})
|
||||
|
||||
t.Run("Checking static-only workflow with built-in SKU list", func(t *testing.T) {
|
||||
|
|
@ -1207,7 +1207,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
nodeInfo, err := asg.TemplateNodeInfo()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.NotEmpty(t, nodeInfo.Pods)
|
||||
assert.NotEmpty(t, nodeInfo.Pods())
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// VMsPool is single instance VM pool
|
||||
|
|
@ -169,7 +169,7 @@ func (agentPool *VMsPool) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo is not implemented.
|
||||
func (agentPool *VMsPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (agentPool *VMsPool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,10 +27,10 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -365,13 +365,13 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (asg *Asg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := asg.baiducloudManager.getAsgTemplate(asg.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -380,8 +380,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.Name)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -183,14 +183,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return toInstances(n.nodePool.Nodes), nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
|
@ -239,13 +240,13 @@ func (ng *brightboxNodeGroup) Exist() bool {
|
|||
return err == nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *brightboxNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
klog.V(4).Info("TemplateNodeInfo")
|
||||
klog.V(4).Infof("Looking for server type %q", ng.serverOptions.ServerType)
|
||||
serverType, err := ng.findServerType()
|
||||
|
|
@ -268,8 +269,7 @@ func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo,
|
|||
Conditions: cloudprovider.BuildReadyConditions(),
|
||||
},
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -316,7 +316,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
Return(fakeServerTypezx45f(), nil)
|
||||
obj, err := makeFakeNodeGroup(t, testclient).TemplateNodeInfo()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fakeResource(), obj.Allocatable)
|
||||
assert.Equal(t, fakeResource(), obj.ToScheduler().Allocatable)
|
||||
}
|
||||
|
||||
func TestNodeGroupErrors(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -45,7 +45,7 @@ type cherryManager interface {
|
|||
getNodes(nodegroup string) ([]string, error)
|
||||
getNodeNames(nodegroup string) ([]string, error)
|
||||
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
||||
templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error)
|
||||
templateNodeInfo(nodegroup string) (*framework.NodeInfo, error)
|
||||
NodeGroupForNode(labels map[string]string, nodeId string) (string, error)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,10 +42,10 @@ import (
|
|||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/version"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -618,7 +618,7 @@ func BuildGenericLabels(nodegroup string, plan *Plan) map[string]string {
|
|||
|
||||
// templateNodeInfo returns a NodeInfo with a node template based on the Cherry Servers plan
|
||||
// that is used to create nodes in a given node group.
|
||||
func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
||||
func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) {
|
||||
node := apiv1.Node{}
|
||||
nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63())
|
||||
node.ObjectMeta = metav1.ObjectMeta{
|
||||
|
|
@ -664,8 +664,7 @@ func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*schedulerfram
|
|||
// GenericLabels
|
||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, cherryPlan))
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup))
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodegroup)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -269,7 +269,7 @@ func (ng *cherryNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *cherryNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *cherryNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return ng.cherryManager.templateNodeInfo(ng.id)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,9 +28,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
autoscaler "k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
||||
|
|
@ -208,15 +208,13 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
node, err := n.buildNodeFromTemplate(n.Id(), n.nodeTemplate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build node from template")
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.Id())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -540,7 +540,7 @@ func TestNodeGroup_TemplateNodeInfo(t *testing.T) {
|
|||
|
||||
nodeInfo, err := ng.TemplateNodeInfo()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(nodeInfo.Pods), 1, "should have one template pod")
|
||||
assert.Equal(t, len(nodeInfo.Pods()), 1, "should have one template pod")
|
||||
assert.Equal(t, nodeInfo.Node().Status.Capacity.Cpu().ToDec().Value(), int64(1000), "should match cpu capacity ")
|
||||
assert.Equal(t, nodeInfo.Node().Status.Capacity.Memory().ToDec().Value(), int64(1073741824), "should match memory capacity")
|
||||
assert.Equal(t, nodeInfo.Node().Status.Capacity.StorageEphemeral().ToDec().Value(), int64(21474836480), "should match epheral storage capacity")
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -214,13 +214,13 @@ type NodeGroup interface {
|
|||
// This list should include also instances that might have not become a kubernetes node yet.
|
||||
Nodes() ([]Instance, error)
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
TemplateNodeInfo() (*schedulerframework.NodeInfo, error)
|
||||
TemplateNodeInfo() (*framework.NodeInfo, error)
|
||||
|
||||
// Exist checks if the node group really exists on the cloud provider side. Allows to tell the
|
||||
// theoretical node group from the real one. Implementation required.
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// asg implements NodeGroup interface.
|
||||
|
|
@ -168,7 +168,7 @@ func (asg *asg) Delete() error {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (asg *asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (asg *asg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
|
|
@ -250,7 +250,7 @@ func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
// allocatable information as well as all pods that are started on the
|
||||
// node by default, using manifest (most likely only kube-proxy).
|
||||
// Implementation optional.
|
||||
func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *nodegroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
if !ng.scalableResource.CanScaleFromZero() {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
|
@ -278,9 +278,7 @@ func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.scalableResource.Name()))
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.scalableResource.Name())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -200,14 +200,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return toInstances(n.nodePool.Nodes), nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -45,7 +45,7 @@ type equinixMetalManager interface {
|
|||
getNodes(nodegroup string) ([]string, error)
|
||||
getNodeNames(nodegroup string) ([]string, error)
|
||||
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
||||
templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error)
|
||||
templateNodeInfo(nodegroup string) (*framework.NodeInfo, error)
|
||||
NodeGroupForNode(labels map[string]string, nodeId string) (string, error)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,10 +38,10 @@ import (
|
|||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/version"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -689,7 +689,7 @@ func BuildGenericLabels(nodegroup string, instanceType string) map[string]string
|
|||
|
||||
// templateNodeInfo returns a NodeInfo with a node template based on the equinix metal plan
|
||||
// that is used to create nodes in a given node group.
|
||||
func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
||||
func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) {
|
||||
node := apiv1.Node{}
|
||||
nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63())
|
||||
node.ObjectMeta = metav1.ObjectMeta{
|
||||
|
|
@ -716,8 +716,7 @@ func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedul
|
|||
// GenericLabels
|
||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan))
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup))
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodegroup)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// equinixMetalNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider.
|
||||
|
|
@ -260,7 +260,7 @@ func (ng *equinixMetalNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return ng.equinixMetalManager.templateNodeInfo(ng.id)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// instancePoolNodeGroup implements cloudprovider.NodeGroup interface for Exoscale Instance Pools.
|
||||
|
|
@ -170,13 +170,13 @@ func (n *instancePoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (n *instancePoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *instancePoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -187,13 +187,13 @@ func (n *sksNodepoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (n *sksNodepoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *sksNodepoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/externalgrpc/protos"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
||||
|
|
@ -44,7 +44,7 @@ type NodeGroup struct {
|
|||
grpcTimeout time.Duration
|
||||
|
||||
mutex sync.Mutex
|
||||
nodeInfo **schedulerframework.NodeInfo // used to cache NodeGroupTemplateNodeInfo() grpc calls
|
||||
nodeInfo **framework.NodeInfo // used to cache NodeGroupTemplateNodeInfo() grpc calls
|
||||
}
|
||||
|
||||
// MaxSize returns maximum size of the node group.
|
||||
|
|
@ -188,7 +188,7 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
|
|
@ -200,7 +200,7 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
// complex approach and does not cover all the scenarios. For the sake of simplicity,
|
||||
// the `nodeInfo` is defined as a Kubernetes `k8s.io.api.core.v1.Node` type
|
||||
// where the system could still extract certain info about the node.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
n.mutex.Lock()
|
||||
defer n.mutex.Unlock()
|
||||
|
||||
|
|
@ -224,11 +224,10 @@ func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
}
|
||||
pbNodeInfo := res.GetNodeInfo()
|
||||
if pbNodeInfo == nil {
|
||||
n.nodeInfo = new(*schedulerframework.NodeInfo)
|
||||
n.nodeInfo = new(*framework.NodeInfo)
|
||||
return nil, nil
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(pbNodeInfo)
|
||||
nodeInfo := framework.NewNodeInfo(pbNodeInfo, nil)
|
||||
n.nodeInfo = &nodeInfo
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,10 +26,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -361,13 +361,12 @@ func (mig *gceMig) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*con
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (mig *gceMig) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (mig *gceMig) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
node, err := mig.gceManager.GetMigTemplateNode(mig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(mig.Id())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/hetzner/hcloud-go/hcloud"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// hetznerNodeGroup implements cloudprovider.NodeGroup interface. hetznerNodeGroup contains
|
||||
|
|
@ -251,14 +251,14 @@ func (n *hetznerNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *hetznerNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *hetznerNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
resourceList, err := getMachineTypeResourceList(n.manager, n.instanceType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err)
|
||||
|
|
@ -297,9 +297,7 @@ func (n *hetznerNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, err
|
|||
}
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.id))
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.id)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,10 +26,10 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
huaweicloudsdkasmodel "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/as/v1/model"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// AutoScalingGroup represents a HuaweiCloud's 'Auto Scaling Group' which also can be treated as a node group.
|
||||
|
|
@ -180,13 +180,13 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := asg.cloudServiceManager.getAsgTemplate(asg.groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -195,8 +195,7 @@ func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, e
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.groupName))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.groupName)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,10 +24,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -144,14 +144,14 @@ func (n *nodePool) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return n.manager.GetInstancesForNodeGroup(n)
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *nodePool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *nodePool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,16 +19,17 @@ package kamatera
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
||||
|
|
@ -147,13 +148,13 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
resourceList, err := n.getResourceList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err)
|
||||
|
|
@ -171,9 +172,7 @@ func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.id))
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.id)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/client-go/informers"
|
||||
|
|
@ -39,7 +40,6 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
|
@ -290,7 +290,7 @@ func (nodeGroup *NodeGroup) DecreaseTargetSize(delta int) error {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -186,10 +186,8 @@ func (nodeGroup *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodeGroup.Id()))
|
||||
nodeInfo.SetNode(nodeGroup.nodeTemplate)
|
||||
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
nodeInfo := framework.NewNodeInfo(nodeGroup.nodeTemplate, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodeGroup.Id())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -305,8 +305,8 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
ti, err := ng.TemplateNodeInfo()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, ti)
|
||||
assert.Len(t, ti.Pods, 1)
|
||||
assert.Contains(t, ti.Pods[0].Pod.Name, fmt.Sprintf("kube-proxy-%s", ng.name))
|
||||
assert.Len(t, ti.Pods(), 1)
|
||||
assert.Contains(t, ti.Pods()[0].Pod.Name, fmt.Sprintf("kube-proxy-%s", ng.name))
|
||||
assert.Equal(t, ng.nodeTemplate, ti.Node())
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/linode/linodego"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -186,14 +186,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// How long to sleep after deleting nodes, to ensure that multiple requests arrive in order.
|
||||
|
|
@ -206,7 +206,7 @@ func (ng *magnumNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *magnumNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *magnumNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
config "k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
|
|
|
|||
|
|
@ -6,15 +6,16 @@ package instancepools
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
||||
ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// InstancePoolNodeGroup implements the NodeGroup interface using OCI instance pools.
|
||||
|
|
@ -172,23 +173,23 @@ func (ip *InstancePoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return ip.manager.GetInstancePoolNodes(*ip)
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a instance-pool was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (ip *InstancePoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ip *InstancePoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
node, err := ip.manager.GetInstancePoolTemplateNode(*ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to build node info template")
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(
|
||||
cloudprovider.BuildKubeProxy(ip.id),
|
||||
ocicommon.BuildCSINodePod(),
|
||||
nodeInfo := framework.NewNodeInfo(
|
||||
node, nil,
|
||||
&framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ip.id)},
|
||||
&framework.PodInfo{Pod: ocicommon.BuildCSINodePod()},
|
||||
)
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
||||
)
|
||||
|
|
@ -273,24 +273,24 @@ func (np *nodePool) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return np.manager.GetNodePoolNodes(np)
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (np *nodePool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (np *nodePool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
node, err := np.manager.GetNodePoolTemplateNode(np)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to build node pool template")
|
||||
}
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(
|
||||
cloudprovider.BuildKubeProxy(np.id),
|
||||
ocicommon.BuildFlannelPod(),
|
||||
ocicommon.BuildProxymuxClientPod(),
|
||||
nodeInfo := framework.NewNodeInfo(
|
||||
node, nil,
|
||||
&framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(np.id)},
|
||||
&framework.PodInfo{Pod: ocicommon.BuildFlannelPod()},
|
||||
&framework.PodInfo{Pod: ocicommon.BuildProxymuxClientPod()},
|
||||
)
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/ovhcloud/sdk"
|
||||
|
|
@ -215,7 +215,7 @@ func (ng *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
// Forge node template in a node group
|
||||
node := &apiv1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -252,9 +252,7 @@ func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
|
||||
// Setup node info template
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
provisioningv1 "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/rancher/provisioning.cattle.io/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
|
|
@ -196,7 +196,7 @@ func (ng *nodeGroup) DecreaseTargetSize(delta int) error {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *nodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *nodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
node := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s-%d", ng.provider.config.ClusterName, ng.Id(), rand.Int63()),
|
||||
|
|
@ -216,9 +216,7 @@ func (ng *nodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
|
||||
// Setup node info template
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -396,19 +396,19 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if nodeInfo.Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() {
|
||||
if nodeInfo.ToScheduler().Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() {
|
||||
t.Fatalf("expected nodeInfo to have %v MilliCPU, got %v",
|
||||
ng.resources.Cpu().MilliValue(), nodeInfo.Allocatable.MilliCPU)
|
||||
ng.resources.Cpu().MilliValue(), nodeInfo.ToScheduler().Allocatable.MilliCPU)
|
||||
}
|
||||
|
||||
if nodeInfo.Allocatable.Memory != ng.resources.Memory().Value() {
|
||||
if nodeInfo.ToScheduler().Allocatable.Memory != ng.resources.Memory().Value() {
|
||||
t.Fatalf("expected nodeInfo to have %v Memory, got %v",
|
||||
ng.resources.Memory().Value(), nodeInfo.Allocatable.Memory)
|
||||
ng.resources.Memory().Value(), nodeInfo.ToScheduler().Allocatable.Memory)
|
||||
}
|
||||
|
||||
if nodeInfo.Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() {
|
||||
if nodeInfo.ToScheduler().Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() {
|
||||
t.Fatalf("expected nodeInfo to have %v ephemeral storage, got %v",
|
||||
ng.resources.StorageEphemeral().Value(), nodeInfo.Allocatable.EphemeralStorage)
|
||||
ng.resources.StorageEphemeral().Value(), nodeInfo.ToScheduler().Allocatable.EphemeralStorage)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,16 +20,17 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway/scalewaygo"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NodeGroup implements cloudprovider.NodeGroup interface.
|
||||
|
|
@ -198,13 +199,13 @@ func (ng *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy).
|
||||
func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (ng *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
klog.V(4).Infof("TemplateNodeInfo,PoolID=%s", ng.p.ID)
|
||||
node := apiv1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -235,8 +236,7 @@ func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
|||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||
node.Spec.Taints = parseTaints(ng.specs.Taints)
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.p.Name))
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.p.Name)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// TcRef contains a reference to some entity in Tencentcloud/TKE world.
|
||||
|
|
@ -247,15 +247,14 @@ func (asg *tcAsg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (asg *tcAsg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (asg *tcAsg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
node, err := asg.tencentcloudManager.GetAsgTemplateNode(asg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("Generate tencentcloud template: labels=%v taints=%v allocatable=%v", node.Labels, node.Spec.Taints, node.Status.Allocatable)
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// OnScaleUpFunc is a function called on node group increase in TestCloudProvider.
|
||||
|
|
@ -56,7 +56,7 @@ type TestCloudProvider struct {
|
|||
onNodeGroupDelete func(string) error
|
||||
hasInstance func(string) (bool, error)
|
||||
machineTypes []string
|
||||
machineTemplates map[string]*schedulerframework.NodeInfo
|
||||
machineTemplates map[string]*framework.NodeInfo
|
||||
priceModel cloudprovider.PricingModel
|
||||
resourceLimiter *cloudprovider.ResourceLimiter
|
||||
}
|
||||
|
|
@ -75,7 +75,7 @@ func NewTestCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc)
|
|||
// NewTestAutoprovisioningCloudProvider builds new TestCloudProvider with autoprovisioning support
|
||||
func NewTestAutoprovisioningCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc,
|
||||
onNodeGroupCreate OnNodeGroupCreateFunc, onNodeGroupDelete OnNodeGroupDeleteFunc,
|
||||
machineTypes []string, machineTemplates map[string]*schedulerframework.NodeInfo) *TestCloudProvider {
|
||||
machineTypes []string, machineTemplates map[string]*framework.NodeInfo) *TestCloudProvider {
|
||||
return &TestCloudProvider{
|
||||
nodes: make(map[string]string),
|
||||
groups: make(map[string]cloudprovider.NodeGroup),
|
||||
|
|
@ -494,7 +494,7 @@ func (tng *TestNodeGroup) Autoprovisioned() bool {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
if tng.cloudProvider.machineTemplates == nil {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// AutoScalingGroup represents a Volcengine 'Auto Scaling Group' which also can be treated as a node group.
|
||||
|
|
@ -169,13 +169,13 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := asg.manager.getAsgTemplate(asg.asgId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -184,8 +184,7 @@ func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, e
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.asgId))
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.asgId)})
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/vultr/govultr"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -193,14 +193,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
|
|
@ -38,7 +39,6 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
|
@ -124,7 +124,7 @@ type ClusterStateRegistry struct {
|
|||
scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest
|
||||
scaleDownRequests []*ScaleDownRequest
|
||||
nodes []*apiv1.Node
|
||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo
|
||||
nodeInfosForGroups map[string]*framework.NodeInfo
|
||||
cloudProvider cloudprovider.CloudProvider
|
||||
perNodeGroupReadiness map[string]Readiness
|
||||
totalReadiness Readiness
|
||||
|
|
@ -338,7 +338,7 @@ func (csr *ClusterStateRegistry) registerFailedScaleUpNoLock(nodeGroup cloudprov
|
|||
}
|
||||
|
||||
// UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats
|
||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) error {
|
||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*framework.NodeInfo, currentTime time.Time) error {
|
||||
csr.updateNodeGroupMetrics()
|
||||
targetSizes, err := getTargetSizes(csr.cloudProvider)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -45,12 +45,12 @@ func currentlyDrainedPods(context *context.AutoscalingContext) []*apiv1.Pod {
|
|||
var pods []*apiv1.Pod
|
||||
_, nodeNames := context.ScaleDownActuator.CheckStatus().DeletionsInProgress()
|
||||
for _, nodeName := range nodeNames {
|
||||
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(nodeName)
|
||||
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
klog.Warningf("Couldn't get node %v info, assuming the node got deleted already: %v", nodeName, err)
|
||||
continue
|
||||
}
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
for _, podInfo := range nodeInfo.Pods() {
|
||||
// Filter out pods that has deletion timestamp set
|
||||
if podInfo.Pod.DeletionTimestamp != nil {
|
||||
klog.Infof("Pod %v has deletion timestamp set, skipping injection to unschedulable pods list", podInfo.Pod.Name)
|
||||
|
|
|
|||
|
|
@ -125,13 +125,12 @@ func TestFilterOutExpendable(t *testing.T) {
|
|||
assert.ElementsMatch(t, tc.wantPods, pods)
|
||||
|
||||
var podsInSnapshot []*apiv1.Pod
|
||||
nodeInfoLister := snapshot.NodeInfos()
|
||||
// Get pods in snapshot
|
||||
for _, n := range tc.nodes {
|
||||
nodeInfo, err := nodeInfoLister.Get(n.Name)
|
||||
nodeInfo, err := snapshot.GetNodeInfo(n.Name)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, nodeInfo.Pods, nil)
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
assert.NotEqual(t, nodeInfo.Pods(), nil)
|
||||
for _, podInfo := range nodeInfo.Pods() {
|
||||
podsInSnapshot = append(podsInSnapshot, podInfo.Pod)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,20 +25,20 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
type filterOutSchedulablePodListProcessor struct {
|
||||
schedulingSimulator *scheduling.HintingSimulator
|
||||
nodeFilter func(*schedulerframework.NodeInfo) bool
|
||||
nodeFilter func(*framework.NodeInfo) bool
|
||||
}
|
||||
|
||||
// NewFilterOutSchedulablePodListProcessor creates a PodListProcessor filtering out schedulable pods
|
||||
func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*schedulerframework.NodeInfo) bool) *filterOutSchedulablePodListProcessor {
|
||||
func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *filterOutSchedulablePodListProcessor {
|
||||
return &filterOutSchedulablePodListProcessor{
|
||||
schedulingSimulator: scheduling.NewHintingSimulator(predicateChecker),
|
||||
nodeFilter: nodeFilter,
|
||||
|
|
|
|||
|
|
@ -24,10 +24,10 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
)
|
||||
|
||||
|
|
@ -35,15 +35,15 @@ func TestFilterOutSchedulable(t *testing.T) {
|
|||
schedulermetrics.Register()
|
||||
|
||||
node := buildReadyTestNode("node", 2000, 100)
|
||||
matchesAllNodes := func(*schedulerframework.NodeInfo) bool { return true }
|
||||
matchesNoNodes := func(*schedulerframework.NodeInfo) bool { return false }
|
||||
matchesAllNodes := func(*framework.NodeInfo) bool { return true }
|
||||
matchesNoNodes := func(*framework.NodeInfo) bool { return false }
|
||||
|
||||
testCases := map[string]struct {
|
||||
nodesWithPods map[*apiv1.Node][]*apiv1.Pod
|
||||
unschedulableCandidates []*apiv1.Pod
|
||||
expectedScheduledPods []*apiv1.Pod
|
||||
expectedUnscheduledPods []*apiv1.Pod
|
||||
nodeFilter func(*schedulerframework.NodeInfo) bool
|
||||
nodeFilter func(*framework.NodeInfo) bool
|
||||
}{
|
||||
"single empty node, no pods": {
|
||||
nodesWithPods: map[*apiv1.Node][]*apiv1.Pod{node: {}},
|
||||
|
|
@ -203,11 +203,11 @@ func TestFilterOutSchedulable(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, unschedulablePods, tc.expectedUnscheduledPods, "unschedulable pods differ")
|
||||
|
||||
nodeInfos, err := clusterSnapshot.NodeInfos().List()
|
||||
nodeInfos, err := clusterSnapshot.ListNodeInfos()
|
||||
assert.NoError(t, err)
|
||||
var scheduledPods []*apiv1.Pod
|
||||
for _, nodeInfo := range nodeInfos {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
for _, podInfo := range nodeInfo.Pods() {
|
||||
scheduledPods = append(scheduledPods, podInfo.Pod)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,13 +18,13 @@ package podlistprocessor
|
|||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/pods"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// NewDefaultPodListProcessor returns a default implementation of the pod list
|
||||
// processor, which wraps and sequentially runs other sub-processors.
|
||||
func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*schedulerframework.NodeInfo) bool) *pods.CombinedPodListProcessor {
|
||||
func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *pods.CombinedPodListProcessor {
|
||||
return pods.NewCombinedPodListProcessor([]pods.PodListProcessor{
|
||||
NewClearTPURequestsPodListProcessor(),
|
||||
NewFilterOutExpendablePodListProcessor(),
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider
|
|||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeInfo, err := clusterSnapshot.NodeInfos().Get(node.Name)
|
||||
nodeInfo, err := clusterSnapshot.GetNodeInfo(node.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Scale-down: can't retrieve node %q from snapshot, err: %v", node.Name, err)
|
||||
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)}
|
||||
|
|
@ -317,7 +317,7 @@ func (a *Actuator) scaleDownNodeToReport(node *apiv1.Node, drain bool) (*status.
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo, err := a.ctx.ClusterSnapshot.NodeInfos().Get(node.Name)
|
||||
nodeInfo, err := a.ctx.ClusterSnapshot.GetNodeInfo(node.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
kube_errors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
|
||||
|
|
@ -35,7 +36,6 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -251,7 +251,7 @@ func (e Evictor) evictPod(ctx *acontext.AutoscalingContext, podToEvict *apiv1.Po
|
|||
}
|
||||
|
||||
func podsToEvict(nodeInfo *framework.NodeInfo, evictDsByDefault bool) (dsPods, nonDsPods []*apiv1.Pod) {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
for _, podInfo := range nodeInfo.Pods() {
|
||||
if pod_util.IsMirrorPod(podInfo.Pod) {
|
||||
continue
|
||||
} else if pod_util.IsDaemonSetPod(podInfo.Pod) {
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
|
|||
EvictionRetryTime: waitBetweenRetries,
|
||||
shutdownGracePeriodByPodPriority: drainConfig,
|
||||
}
|
||||
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
_, err = evictor.EvictDaemonSetPods(&context, nodeInfo)
|
||||
if scenario.err != nil {
|
||||
|
|
@ -213,7 +213,7 @@ func TestDrainNodeWithPods(t *testing.T) {
|
|||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -277,7 +277,7 @@ func TestDrainNodeWithPodsWithRescheduled(t *testing.T) {
|
|||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -346,7 +346,7 @@ func TestDrainNodeWithPodsWithRetries(t *testing.T) {
|
|||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, d1})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -409,7 +409,7 @@ func TestDrainNodeWithPodsDaemonSetEvictionFailure(t *testing.T) {
|
|||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1, d2})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -470,7 +470,7 @@ func TestDrainNodeWithPodsEvictionFailure(t *testing.T) {
|
|||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.Error(t, err)
|
||||
|
|
@ -536,7 +536,7 @@ func TestDrainWithPodsNodeDisappearanceFailure(t *testing.T) {
|
|||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.Error(t, err)
|
||||
|
|
@ -626,9 +626,9 @@ func TestPodsToEvict(t *testing.T) {
|
|||
if tc.nodeNameOverwrite != "" {
|
||||
nodeName = tc.nodeNameOverwrite
|
||||
}
|
||||
nodeInfo, err := snapshot.NodeInfos().Get(nodeName)
|
||||
nodeInfo, err := snapshot.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("NodeInfos().Get() unexpected error: %v", err)
|
||||
t.Fatalf("GetNodeInfo() unexpected error: %v", err)
|
||||
}
|
||||
gotDsPods, gotNonDsPods := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForOccupiedNodes)
|
||||
if diff := cmp.Diff(tc.wantDsPods, gotDsPods, cmpopts.EquateEmpty()); diff != "" {
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"sync"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
|
|
|
|||
|
|
@ -33,10 +33,9 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/core/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
type testIteration struct {
|
||||
|
|
@ -215,18 +214,12 @@ func scheduleAll(toSchedule []*budgets.NodeGroupView, scheduler *GroupDeletionSc
|
|||
return fmt.Errorf("failed to get target size for node group %q: %s", bucket.Group.Id(), err)
|
||||
}
|
||||
for _, node := range bucket.Nodes {
|
||||
scheduler.ScheduleDeletion(infoForNode(node), bucket.Group, bucketSize, false)
|
||||
scheduler.ScheduleDeletion(framework.NewTestNodeInfo(node), bucket.Group, bucketSize, false)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func infoForNode(n *apiv1.Node) *framework.NodeInfo {
|
||||
info := schedulerframework.NewNodeInfo()
|
||||
info.SetNode(n)
|
||||
return info
|
||||
}
|
||||
|
||||
func mergeLists(lists ...[]*budgets.NodeGroupView) []*budgets.NodeGroupView {
|
||||
merged := []*budgets.NodeGroupView{}
|
||||
for _, l := range lists {
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ func TestPriorityEvictor(t *testing.T) {
|
|||
fullDsEviction: true,
|
||||
}
|
||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3})
|
||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
||||
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||
assert.NoError(t, err)
|
||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -25,13 +25,13 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -73,7 +73,7 @@ func (c *Checker) FilterOutUnremovable(context *context.AutoscalingContext, scal
|
|||
utilLogsQuota := klogx.NewLoggingQuota(20)
|
||||
|
||||
for _, node := range scaleDownCandidates {
|
||||
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(node.Name)
|
||||
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(node.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Can't retrieve scale-down candidate %s from snapshot, err: %v", node.Name, err)
|
||||
ineligible = append(ineligible, &simulator.UnremovableNode{Node: node, Reason: simulator.UnexpectedError})
|
||||
|
|
@ -106,7 +106,7 @@ func (c *Checker) FilterOutUnremovable(context *context.AutoscalingContext, scal
|
|||
return currentlyUnneededNodeNames, utilizationMap, ineligible
|
||||
}
|
||||
|
||||
func (c *Checker) unremovableReasonAndNodeUtilization(context *context.AutoscalingContext, timestamp time.Time, nodeInfo *schedulerframework.NodeInfo, utilLogsQuota *klogx.Quota) (simulator.UnremovableReason, *utilization.Info) {
|
||||
func (c *Checker) unremovableReasonAndNodeUtilization(context *context.AutoscalingContext, timestamp time.Time, nodeInfo *framework.NodeInfo, utilLogsQuota *klogx.Quota) (simulator.UnremovableReason, *utilization.Info) {
|
||||
node := nodeInfo.Node()
|
||||
|
||||
if actuation.IsNodeBeingDeleted(node, timestamp) {
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ func (p *Planner) addUnremovableNodes(unremovableNodes []simulator.UnremovableNo
|
|||
}
|
||||
|
||||
func allNodes(s clustersnapshot.ClusterSnapshot) ([]*apiv1.Node, error) {
|
||||
nodeInfos, err := s.NodeInfos().List()
|
||||
nodeInfos, err := s.ListNodeInfos()
|
||||
if err != nil {
|
||||
// This should never happen, List() returns err only because scheduler interface requires it.
|
||||
return nil, err
|
||||
|
|
@ -264,7 +264,7 @@ func (p *Planner) categorizeNodes(podDestinations map[string]bool, scaleDownCand
|
|||
unremovableCount := 0
|
||||
var removableList []simulator.NodeToBeRemoved
|
||||
atomicScaleDownNodesCount := 0
|
||||
p.unremovableNodes.Update(p.context.ClusterSnapshot.NodeInfos(), p.latestUpdate)
|
||||
p.unremovableNodes.Update(p.context.ClusterSnapshot, p.latestUpdate)
|
||||
currentlyUnneededNodeNames, utilizationMap, ineligible := p.eligibilityChecker.FilterOutUnremovable(p.context, scaleDownCandidates, p.latestUpdate, p.unremovableNodes)
|
||||
for _, n := range ineligible {
|
||||
p.unremovableNodes.Add(n)
|
||||
|
|
|
|||
|
|
@ -20,10 +20,10 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Nodes tracks the state of cluster nodes that cannot be removed.
|
||||
|
|
@ -40,21 +40,21 @@ func NewNodes() *Nodes {
|
|||
}
|
||||
}
|
||||
|
||||
// NodeInfoGetter is anything that can return NodeInfo object by name.
|
||||
type NodeInfoGetter interface {
|
||||
Get(name string) (*schedulerframework.NodeInfo, error)
|
||||
// nodeInfoGetter is anything that can return NodeInfo object by name.
|
||||
type nodeInfoGetter interface {
|
||||
GetNodeInfo(name string) (*framework.NodeInfo, error)
|
||||
}
|
||||
|
||||
// Update updates the internal structure according to current state of the
|
||||
// cluster. Removes the nodes that are no longer in the nodes list.
|
||||
func (n *Nodes) Update(nodeInfos NodeInfoGetter, timestamp time.Time) {
|
||||
func (n *Nodes) Update(nodeInfos nodeInfoGetter, timestamp time.Time) {
|
||||
n.reasons = make(map[string]*simulator.UnremovableNode)
|
||||
if len(n.ttls) <= 0 {
|
||||
return
|
||||
}
|
||||
newTTLs := make(map[string]time.Time, len(n.ttls))
|
||||
for name, ttl := range n.ttls {
|
||||
if _, err := nodeInfos.Get(name); err != nil {
|
||||
if _, err := nodeInfos.GetNodeInfo(name); err != nil {
|
||||
// Not logging on error level as most likely cause is that node is no longer in the cluster.
|
||||
klog.Infof("Can't retrieve node %s from snapshot, removing from unremovable nodes, err: %v", name, err)
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -22,11 +22,11 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -107,7 +107,7 @@ type fakeNodeInfoGetter struct {
|
|||
names map[string]bool
|
||||
}
|
||||
|
||||
func (f *fakeNodeInfoGetter) Get(name string) (*schedulerframework.NodeInfo, error) {
|
||||
func (f *fakeNodeInfoGetter) GetNodeInfo(name string) (*framework.NodeInfo, error) {
|
||||
// We don't actually care about the node info object itself, just its presence.
|
||||
_, found := f.names[name]
|
||||
if found {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
|
|
@ -30,6 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ import (
|
|||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
|
|
@ -63,7 +63,7 @@ func newScaleUpExecutor(
|
|||
// If there were multiple concurrent errors one combined error is returned.
|
||||
func (e *scaleUpExecutor) ExecuteScaleUps(
|
||||
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
now time.Time,
|
||||
atomic bool,
|
||||
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
||||
|
|
@ -76,7 +76,7 @@ func (e *scaleUpExecutor) ExecuteScaleUps(
|
|||
|
||||
func (e *scaleUpExecutor) executeScaleUpsSync(
|
||||
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
now time.Time,
|
||||
atomic bool,
|
||||
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
||||
|
|
@ -96,7 +96,7 @@ func (e *scaleUpExecutor) executeScaleUpsSync(
|
|||
|
||||
func (e *scaleUpExecutor) executeScaleUpsParallel(
|
||||
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
now time.Time,
|
||||
atomic bool,
|
||||
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
||||
|
|
@ -156,7 +156,7 @@ func (e *scaleUpExecutor) increaseSize(nodeGroup cloudprovider.NodeGroup, increa
|
|||
|
||||
func (e *scaleUpExecutor) executeScaleUp(
|
||||
info nodegroupset.ScaleUpInfo,
|
||||
nodeInfo *schedulerframework.NodeInfo,
|
||||
nodeInfo *framework.NodeInfo,
|
||||
availableGPUTypes map[string]struct{},
|
||||
now time.Time,
|
||||
atomic bool,
|
||||
|
|
|
|||
|
|
@ -22,25 +22,24 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ScaleUpOrchestrator implements scaleup.Orchestrator interface.
|
||||
|
|
@ -87,7 +86,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
|
|||
unschedulablePods []*apiv1.Pod,
|
||||
nodes []*apiv1.Node,
|
||||
daemonSets []*appsv1.DaemonSet,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
allOrNothing bool, // Either request enough capacity for all unschedulablePods, or don't request it at all.
|
||||
) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||
if !o.initialized {
|
||||
|
|
@ -277,7 +276,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo) (int, errors.AutoscalerError) {
|
||||
func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo) (int, errors.AutoscalerError) {
|
||||
nodeInfo, found := nodeInfos[nodeGroup.Id()]
|
||||
if !found {
|
||||
// This should never happen, as we already should have retrieved nodeInfo for any considered nodegroup.
|
||||
|
|
@ -293,7 +292,7 @@ func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.L
|
|||
// appropriate status or error if an unexpected error occurred.
|
||||
func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
|
||||
nodes []*apiv1.Node,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||
if !o.initialized {
|
||||
return status.UpdateScaleUpError(&status.ScaleUpStatus{}, errors.NewAutoscalerError(errors.InternalError, "ScaleUpOrchestrator is not initialized"))
|
||||
|
|
@ -390,7 +389,7 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
|
|||
// filterValidScaleUpNodeGroups filters the node groups that are valid for scale-up
|
||||
func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups(
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
resourcesLeft resource.Limits,
|
||||
currentNodeCount int,
|
||||
now time.Time,
|
||||
|
|
@ -449,7 +448,7 @@ func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups(
|
|||
func (o *ScaleUpOrchestrator) ComputeExpansionOption(
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
currentNodeCount int,
|
||||
now time.Time,
|
||||
allOrNothing bool,
|
||||
|
|
@ -499,7 +498,7 @@ func (o *ScaleUpOrchestrator) ComputeExpansionOption(
|
|||
// CreateNodeGroup will try to create a new node group based on the initialOption.
|
||||
func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
||||
initialOption *expander.Option,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||
podEquivalenceGroups []*equivalence.PodGroup,
|
||||
daemonSets []*appsv1.DaemonSet,
|
||||
|
|
@ -564,14 +563,14 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
|||
func (o *ScaleUpOrchestrator) SchedulablePodGroups(
|
||||
podEquivalenceGroups []*equivalence.PodGroup,
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfo *schedulerframework.NodeInfo,
|
||||
nodeInfo *framework.NodeInfo,
|
||||
) []estimator.PodEquivalenceGroup {
|
||||
o.autoscalingContext.ClusterSnapshot.Fork()
|
||||
defer o.autoscalingContext.ClusterSnapshot.Revert()
|
||||
|
||||
// Add test node to snapshot.
|
||||
var allPods []*apiv1.Pod
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
for _, podInfo := range nodeInfo.Pods() {
|
||||
allPods = append(allPods, podInfo.Pod)
|
||||
}
|
||||
if err := o.autoscalingContext.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), allPods); err != nil {
|
||||
|
|
@ -603,9 +602,9 @@ func (o *ScaleUpOrchestrator) SchedulablePodGroups(
|
|||
}
|
||||
|
||||
// UpcomingNodes returns a list of nodes that are not ready but should be.
|
||||
func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*schedulerframework.NodeInfo) ([]*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*framework.NodeInfo) ([]*framework.NodeInfo, errors.AutoscalerError) {
|
||||
upcomingCounts, _ := o.clusterStateRegistry.GetUpcomingNodes()
|
||||
upcomingNodes := make([]*schedulerframework.NodeInfo, 0)
|
||||
upcomingNodes := make([]*framework.NodeInfo, 0)
|
||||
for nodeGroup, numberOfNodes := range upcomingCounts {
|
||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||
if !found {
|
||||
|
|
@ -636,7 +635,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupReadyToScaleUp(nodeGroup cloudprovider.
|
|||
}
|
||||
|
||||
// IsNodeGroupResourceExceeded returns nil if node group resource limits are not exceeded, otherwise a reason is provided.
|
||||
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, numNodes int) status.Reasons {
|
||||
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, numNodes int) status.Reasons {
|
||||
resourcesDelta, err := o.resourceManager.DeltaForNode(o.autoscalingContext, nodeInfo, nodeGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err)
|
||||
|
|
@ -682,7 +681,7 @@ func (o *ScaleUpOrchestrator) balanceScaleUps(
|
|||
now time.Time,
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
newNodes int,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||
) ([]nodegroupset.ScaleUpInfo, errors.AutoscalerError) {
|
||||
// Recompute similar node groups in case they need to be updated
|
||||
|
|
@ -718,7 +717,7 @@ func (o *ScaleUpOrchestrator) balanceScaleUps(
|
|||
// set of pods as the main node group.
|
||||
func (o *ScaleUpOrchestrator) ComputeSimilarNodeGroups(
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||
now time.Time,
|
||||
) []cloudprovider.NodeGroup {
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
kube_record "k8s.io/client-go/tools/record"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
|
|
@ -57,7 +58,6 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -146,8 +146,7 @@ func TestZeroOrMaxNodeScaling(t *testing.T) {
|
|||
|
||||
n := BuildTestNode("n", 1000, 1000)
|
||||
SetNodeReadyState(n, true, time.Time{})
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(n)
|
||||
nodeInfo := framework.NewTestNodeInfo(n)
|
||||
|
||||
cases := map[string]struct {
|
||||
testConfig *ScaleUpTestConfig
|
||||
|
|
@ -835,8 +834,7 @@ func TestNoCreateNodeGroupMaxCoresLimitHit(t *testing.T) {
|
|||
|
||||
largeNode := BuildTestNode("n", 8000, 8000)
|
||||
SetNodeReadyState(largeNode, true, time.Time{})
|
||||
largeNodeInfo := schedulerframework.NewNodeInfo()
|
||||
largeNodeInfo.SetNode(largeNode)
|
||||
largeNodeInfo := framework.NewTestNodeInfo(largeNode)
|
||||
|
||||
config := &ScaleUpTestConfig{
|
||||
EnableAutoprovisioning: true,
|
||||
|
|
@ -1004,7 +1002,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
|
|||
}
|
||||
if len(config.NodeTemplateConfigs) > 0 {
|
||||
machineTypes := []string{}
|
||||
machineTemplates := map[string]*schedulerframework.NodeInfo{}
|
||||
machineTemplates := map[string]*framework.NodeInfo{}
|
||||
for _, ntc := range config.NodeTemplateConfigs {
|
||||
machineTypes = append(machineTypes, ntc.MachineType)
|
||||
machineTemplates[ntc.NodeGroupName] = ntc.NodeInfo
|
||||
|
|
@ -1285,7 +1283,7 @@ type constNodeGroupSetProcessor struct {
|
|||
similarNodeGroups []cloudprovider.NodeGroup
|
||||
}
|
||||
|
||||
func (p *constNodeGroupSetProcessor) FindSimilarNodeGroups(_ *context.AutoscalingContext, _ cloudprovider.NodeGroup, _ map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
func (p *constNodeGroupSetProcessor) FindSimilarNodeGroups(_ *context.AutoscalingContext, _ cloudprovider.NodeGroup, _ map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
return p.similarNodeGroups, nil
|
||||
}
|
||||
|
||||
|
|
@ -1516,8 +1514,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
|
||||
t1 := BuildTestNode("t1", 4000, 1000000)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulerframework.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
ti1 := framework.NewTestNodeInfo(t1)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
func(nodeGroup string, increase int) error {
|
||||
|
|
@ -1526,7 +1523,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
}, nil, func(nodeGroup string) error {
|
||||
createdGroups <- nodeGroup
|
||||
return nil
|
||||
}, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1})
|
||||
}, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1})
|
||||
|
||||
options := config.AutoscalingOptions{
|
||||
EstimatorName: estimator.BinpackingEstimatorName,
|
||||
|
|
@ -1570,8 +1567,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
|||
|
||||
t1 := BuildTestNode("t1", 100, 1000000)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulerframework.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
ti1 := framework.NewTestNodeInfo(t1)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
func(nodeGroup string, increase int) error {
|
||||
|
|
@ -1580,7 +1576,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
|||
}, nil, func(nodeGroup string) error {
|
||||
createdGroups <- nodeGroup
|
||||
return nil
|
||||
}, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1})
|
||||
}, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1})
|
||||
|
||||
options := config.AutoscalingOptions{
|
||||
BalanceSimilarNodeGroups: true,
|
||||
|
|
@ -1672,20 +1668,18 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
|
|||
func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
||||
t1 := BuildTestNode("t1", 100, 0)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulerframework.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
ti1 := framework.NewTestNodeInfo(t1)
|
||||
|
||||
t2 := BuildTestNode("t2", 0, 100)
|
||||
SetNodeReadyState(t2, true, time.Time{})
|
||||
ti2 := schedulerframework.NewNodeInfo()
|
||||
ti2.SetNode(t2)
|
||||
ti2 := framework.NewTestNodeInfo(t2)
|
||||
|
||||
testCases := []struct {
|
||||
upcomingNodeGroupsNames []string
|
||||
podsToAdd []*v1.Pod
|
||||
isUpcomingMockMap map[string]bool
|
||||
machineTypes []string
|
||||
machineTemplates map[string]*schedulerframework.NodeInfo
|
||||
machineTemplates map[string]*framework.NodeInfo
|
||||
expectedCreatedGroups map[string]bool
|
||||
expectedExpandedGroups map[string]int
|
||||
}{
|
||||
|
|
@ -1694,7 +1688,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
|||
podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0), BuildTestPod("p2", 80, 0)},
|
||||
isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true},
|
||||
machineTypes: []string{"T1"},
|
||||
machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1},
|
||||
machineTemplates: map[string]*framework.NodeInfo{"T1": ti1},
|
||||
expectedCreatedGroups: map[string]bool{},
|
||||
expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 2},
|
||||
},
|
||||
|
|
@ -1703,7 +1697,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
|||
podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0)},
|
||||
isUpcomingMockMap: map[string]bool{},
|
||||
machineTypes: []string{"T1"},
|
||||
machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1},
|
||||
machineTemplates: map[string]*framework.NodeInfo{"T1": ti1},
|
||||
expectedCreatedGroups: map[string]bool{"autoprovisioned-T1": true},
|
||||
expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 1},
|
||||
},
|
||||
|
|
@ -1712,7 +1706,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
|||
podsToAdd: []*v1.Pod{BuildTestPod("p3", 0, 100), BuildTestPod("p2", 0, 100)},
|
||||
isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true},
|
||||
machineTypes: []string{"T1", "T2"},
|
||||
machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1, "T2": ti2},
|
||||
machineTemplates: map[string]*framework.NodeInfo{"T1": ti1, "T2": ti2},
|
||||
expectedCreatedGroups: map[string]bool{"autoprovisioned-T2": true},
|
||||
expectedExpandedGroups: map[string]int{"autoprovisioned-T2": 2},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -26,9 +26,9 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/customresources"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// LimitUnknown is used as a value in ResourcesLimits if actual limit could not be obtained due to errors talking to cloud provider.
|
||||
|
|
@ -59,7 +59,7 @@ func NewManager(crp customresources.CustomResourcesProcessor) *Manager {
|
|||
}
|
||||
|
||||
// DeltaForNode calculates the amount of resources that will be used from the cluster when creating a node.
|
||||
func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (Delta, errors.AutoscalerError) {
|
||||
func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (Delta, errors.AutoscalerError) {
|
||||
resultScaleUpDelta := make(Delta)
|
||||
nodeCPU, nodeMemory := utils.GetNodeCoresAndMemory(nodeInfo.Node())
|
||||
resultScaleUpDelta[cloudprovider.ResourceNameCores] = nodeCPU
|
||||
|
|
@ -85,7 +85,7 @@ func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *schedu
|
|||
}
|
||||
|
||||
// ResourcesLeft calculates the amount of resources left in the cluster.
|
||||
func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodes []*corev1.Node) (Limits, errors.AutoscalerError) {
|
||||
func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodes []*corev1.Node) (Limits, errors.AutoscalerError) {
|
||||
nodesFromNotAutoscaledGroups, err := utils.FilterOutNodesFromNotAutoscaledGroups(nodes, ctx.CloudProvider)
|
||||
if err != nil {
|
||||
return nil, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ")
|
||||
|
|
@ -143,7 +143,7 @@ func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[s
|
|||
}
|
||||
|
||||
// ApplyLimits calculates the new node count by applying the left resource limits of the cluster.
|
||||
func (m *Manager) ApplyLimits(ctx *context.AutoscalingContext, newCount int, resourceLeft Limits, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (int, errors.AutoscalerError) {
|
||||
func (m *Manager) ApplyLimits(ctx *context.AutoscalingContext, newCount int, resourceLeft Limits, nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (int, errors.AutoscalerError) {
|
||||
delta, err := m.DeltaForNode(ctx, nodeInfo, nodeGroup)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
|
@ -203,7 +203,7 @@ func LimitsNotExceeded() LimitsCheckResult {
|
|||
return LimitsCheckResult{false, []string{}}
|
||||
}
|
||||
|
||||
func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (int64, int64, errors.AutoscalerError) {
|
||||
func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (int64, int64, errors.AutoscalerError) {
|
||||
var coresTotal int64
|
||||
var memoryTotal int64
|
||||
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
|
||||
|
|
@ -233,7 +233,7 @@ func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos ma
|
|||
return coresTotal, memoryTotal, nil
|
||||
}
|
||||
|
||||
func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (map[string]int64, errors.AutoscalerError) {
|
||||
func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (map[string]int64, errors.AutoscalerError) {
|
||||
result := make(map[string]int64)
|
||||
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
|
||||
currentSize, err := nodeGroup.TargetSize()
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// Orchestrator is a component that picks the node group to resize and triggers
|
||||
|
|
@ -47,7 +47,7 @@ type Orchestrator interface {
|
|||
unschedulablePods []*apiv1.Pod,
|
||||
nodes []*apiv1.Node,
|
||||
daemonSets []*appsv1.DaemonSet,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
allOrNothing bool,
|
||||
) (*status.ScaleUpStatus, errors.AutoscalerError)
|
||||
// ScaleUpToNodeGroupMinSize tries to scale up node groups that have less nodes
|
||||
|
|
@ -56,6 +56,6 @@ type Orchestrator interface {
|
|||
// appropriate status or error if an unexpected error occurred.
|
||||
ScaleUpToNodeGroupMinSize(
|
||||
nodes []*apiv1.Node,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
) (*status.ScaleUpStatus, errors.AutoscalerError)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||
|
|
@ -58,7 +59,6 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -496,8 +496,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
l, err := a.ClusterSnapshot.NodeInfos().List()
|
||||
l, err := a.ClusterSnapshot.ListNodeInfos()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to fetch ClusterNode List for Debugging Snapshot, %v", err)
|
||||
} else {
|
||||
|
|
@ -679,7 +678,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*schedulerframework.NodeInfo) error {
|
||||
func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*framework.NodeInfo) error {
|
||||
nodeGroups := a.nodeGroupsById()
|
||||
upcomingNodeGroups := make(map[string]int)
|
||||
upcomingNodesFromUpcomingNodeGroups := 0
|
||||
|
|
@ -691,7 +690,7 @@ func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[
|
|||
isUpcomingNodeGroup := a.processors.AsyncNodeGroupStateChecker.IsUpcoming(nodeGroup)
|
||||
for _, upcomingNode := range upcomingNodes {
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range upcomingNode.Pods {
|
||||
for _, podInfo := range upcomingNode.Pods() {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
err := a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), pods)
|
||||
|
|
@ -989,7 +988,7 @@ func filterNodesFromSelectedGroups(cp cloudprovider.CloudProvider, nodes ...*api
|
|||
return filtered
|
||||
}
|
||||
|
||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) caerrors.AutoscalerError {
|
||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*framework.NodeInfo, currentTime time.Time) caerrors.AutoscalerError {
|
||||
err := a.clusterStateRegistry.UpdateNodes(allNodes, nodeInfosForGroups, currentTime)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update node registry: %v", err)
|
||||
|
|
@ -1016,8 +1015,8 @@ func allPodsAreNew(pods []*apiv1.Pod, currentTime time.Time) bool {
|
|||
return found && oldest.Add(unschedulablePodWithGpuTimeBuffer).After(currentTime)
|
||||
}
|
||||
|
||||
func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*schedulerframework.NodeInfo) map[string][]*schedulerframework.NodeInfo {
|
||||
upcomingNodes := make(map[string][]*schedulerframework.NodeInfo)
|
||||
func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*framework.NodeInfo) map[string][]*framework.NodeInfo {
|
||||
upcomingNodes := make(map[string][]*framework.NodeInfo)
|
||||
for nodeGroup, numberOfNodes := range upcomingCounts {
|
||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||
if !found {
|
||||
|
|
@ -1030,7 +1029,7 @@ func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*s
|
|||
}
|
||||
nodeTemplate.Node().Annotations[NodeUpcomingAnnotation] = "true"
|
||||
|
||||
var nodes []*schedulerframework.NodeInfo
|
||||
var nodes []*framework.NodeInfo
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
// Ensure new nodes have different names because nodeName
|
||||
// will be used as a map key. Also deep copy pods (daemonsets &
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
|
|
@ -72,7 +73,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
v1appslister "k8s.io/client-go/listers/apps/v1"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
|
@ -336,8 +336,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
p2 := BuildTestPod("p2", 600, 100, MarkUnschedulable())
|
||||
|
||||
tn := BuildTestNode("tn", 1000, 1000)
|
||||
tni := schedulerframework.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
tni := framework.NewTestNodeInfo(tn)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
func(id string, delta int) error {
|
||||
|
|
@ -348,7 +347,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
return ret
|
||||
},
|
||||
nil, nil,
|
||||
nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni, "ng3": tni})
|
||||
nil, map[string]*framework.NodeInfo{"ng1": tni, "ng2": tni, "ng3": tni})
|
||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||
provider.AddNode("ng1", n1)
|
||||
ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup)
|
||||
|
|
@ -514,8 +513,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {
|
|||
SetNodeReadyState(n2, true, time.Now())
|
||||
|
||||
tn := BuildTestNode("tn", 1000, 1000)
|
||||
tni := schedulerframework.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
tni := framework.NewTestNodeInfo(tn)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
func(id string, delta int) error {
|
||||
|
|
@ -526,7 +524,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {
|
|||
return ret
|
||||
},
|
||||
nil, nil,
|
||||
nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni})
|
||||
nil, map[string]*framework.NodeInfo{"ng1": tni, "ng2": tni})
|
||||
assert.NotNil(t, provider)
|
||||
|
||||
provider.AddNodeGroup("ng1", 0, 10, 1)
|
||||
|
|
@ -744,16 +742,13 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
|
||||
tn1 := BuildTestNode("tn1", 100, 1000)
|
||||
SetNodeReadyState(tn1, true, time.Now())
|
||||
tni1 := schedulerframework.NewNodeInfo()
|
||||
tni1.SetNode(tn1)
|
||||
tni1 := framework.NewTestNodeInfo(tn1)
|
||||
tn2 := BuildTestNode("tn2", 1000, 1000)
|
||||
SetNodeReadyState(tn2, true, time.Now())
|
||||
tni2 := schedulerframework.NewNodeInfo()
|
||||
tni2.SetNode(tn2)
|
||||
tni2 := framework.NewTestNodeInfo(tn2)
|
||||
tn3 := BuildTestNode("tn3", 100, 1000)
|
||||
SetNodeReadyState(tn2, true, time.Now())
|
||||
tni3 := schedulerframework.NewNodeInfo()
|
||||
tni3.SetNode(tn3)
|
||||
tni3 := framework.NewTestNodeInfo(tn3)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
func(id string, delta int) error {
|
||||
|
|
@ -767,7 +762,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
}, func(id string) error {
|
||||
return onNodeGroupDeleteMock.Delete(id)
|
||||
},
|
||||
[]string{"TN1", "TN2"}, map[string]*schedulerframework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||
[]string{"TN1", "TN2"}, map[string]*framework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||
provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN1", 0, 10, 0, "TN1")
|
||||
autoprovisionedTN1 := reflect.ValueOf(provider.GetNodeGroup("autoprovisioned-TN1")).Interface().(*testprovider.TestNodeGroup)
|
||||
|
|
@ -2005,13 +2000,13 @@ func (f *candidateTrackingFakePlanner) NodeUtilizationMap() map[string]utilizati
|
|||
}
|
||||
|
||||
func assertSnapshotNodeCount(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, wantCount int) {
|
||||
nodeInfos, err := snapshot.NodeInfos().List()
|
||||
nodeInfos, err := snapshot.ListNodeInfos()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, nodeInfos, wantCount)
|
||||
}
|
||||
|
||||
func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) {
|
||||
nodeInfos, err := snapshot.NodeInfos().List()
|
||||
nodeInfos, err := snapshot.ListNodeInfos()
|
||||
assert.NoError(t, err)
|
||||
for _, nodeInfo := range nodeInfos {
|
||||
assert.NotContains(t, nodeNames, nodeInfo.Node().Name)
|
||||
|
|
@ -2019,7 +2014,7 @@ func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnap
|
|||
}
|
||||
|
||||
func assertNodesInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) {
|
||||
nodeInfos, err := snapshot.NodeInfos().List()
|
||||
nodeInfos, err := snapshot.ListNodeInfos()
|
||||
assert.NoError(t, err)
|
||||
snapshotNodeNames := map[string]bool{}
|
||||
for _, nodeInfo := range nodeInfos {
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
|
|
@ -48,7 +49,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kube_client "k8s.io/client-go/kubernetes"
|
||||
kube_record "k8s.io/client-go/tools/record"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// NodeConfig is a node config used in tests
|
||||
|
|
@ -100,7 +100,7 @@ type NodeGroupConfig struct {
|
|||
// NodeTemplateConfig is a structure to provide node info in tests
|
||||
type NodeTemplateConfig struct {
|
||||
MachineType string
|
||||
NodeInfo *schedulerframework.NodeInfo
|
||||
NodeInfo *framework.NodeInfo
|
||||
NodeGroupName string
|
||||
}
|
||||
|
||||
|
|
@ -284,9 +284,9 @@ type MockAutoprovisioningNodeGroupListProcessor struct {
|
|||
}
|
||||
|
||||
// Process extends the list of node groups
|
||||
func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod,
|
||||
) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) {
|
||||
) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) {
|
||||
machines, err := context.CloudProvider.GetAvailableMachineTypes()
|
||||
assert.NoError(p.T, err)
|
||||
|
||||
|
|
@ -368,7 +368,7 @@ func (r *MockReportingStrategy) LastInputOptions() []GroupSizeChange {
|
|||
// BestOption satisfies the Strategy interface. Picks the best option from those passed as an argument.
|
||||
// When parameter optionToChoose is defined, it's picked as the best one.
|
||||
// Otherwise, random option is used.
|
||||
func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||
r.results.inputOptions = expanderOptionsToGroupSizeChanges(options)
|
||||
if r.optionToChoose == nil {
|
||||
return r.defaultStrategy.BestOption(options, nodeInfo)
|
||||
|
|
|
|||
|
|
@ -27,16 +27,16 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
|
||||
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) {
|
||||
id := nodeGroup.Id()
|
||||
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
||||
if err != nil {
|
||||
|
|
@ -55,12 +55,11 @@ func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap
|
|||
if err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
for _, podInfo := range baseNodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
for _, podInfo := range baseNodeInfo.Pods() {
|
||||
pods = append(pods, &framework.PodInfo{Pod: podInfo.Pod})
|
||||
}
|
||||
|
||||
sanitizedNodeInfo := schedulerframework.NewNodeInfo(SanitizePods(pods, sanitizedNode)...)
|
||||
sanitizedNodeInfo.SetNode(sanitizedNode)
|
||||
sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, SanitizePods(pods, sanitizedNode)...)
|
||||
return sanitizedNodeInfo, nil
|
||||
}
|
||||
|
||||
|
|
@ -91,15 +90,14 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl
|
|||
}
|
||||
|
||||
// DeepCopyNodeInfo clones the provided nodeInfo
|
||||
func DeepCopyNodeInfo(nodeInfo *schedulerframework.NodeInfo) *schedulerframework.NodeInfo {
|
||||
newPods := make([]*apiv1.Pod, 0)
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
newPods = append(newPods, podInfo.Pod.DeepCopy())
|
||||
func DeepCopyNodeInfo(nodeInfo *framework.NodeInfo) *framework.NodeInfo {
|
||||
newPods := make([]*framework.PodInfo, 0)
|
||||
for _, podInfo := range nodeInfo.Pods() {
|
||||
newPods = append(newPods, &framework.PodInfo{Pod: podInfo.Pod.DeepCopy()})
|
||||
}
|
||||
|
||||
// Build a new node info.
|
||||
newNodeInfo := schedulerframework.NewNodeInfo(newPods...)
|
||||
newNodeInfo.SetNode(nodeInfo.Node().DeepCopy())
|
||||
newNodeInfo := framework.NewNodeInfo(nodeInfo.Node().DeepCopy(), nil, newPods...)
|
||||
return newNodeInfo
|
||||
}
|
||||
|
||||
|
|
@ -121,13 +119,13 @@ func SanitizeNode(node *apiv1.Node, nodeGroup string, taintConfig taints.TaintCo
|
|||
}
|
||||
|
||||
// SanitizePods cleans up pods used for node group templates
|
||||
func SanitizePods(pods []*apiv1.Pod, sanitizedNode *apiv1.Node) []*apiv1.Pod {
|
||||
func SanitizePods(pods []*framework.PodInfo, sanitizedNode *apiv1.Node) []*framework.PodInfo {
|
||||
// Update node name in pods.
|
||||
sanitizedPods := make([]*apiv1.Pod, 0)
|
||||
sanitizedPods := make([]*framework.PodInfo, 0)
|
||||
for _, pod := range pods {
|
||||
sanitizedPod := pod.DeepCopy()
|
||||
sanitizedPod := pod.Pod.DeepCopy()
|
||||
sanitizedPod.Spec.NodeName = sanitizedNode.Name
|
||||
sanitizedPods = append(sanitizedPods, sanitizedPod)
|
||||
sanitizedPods = append(sanitizedPods, &framework.PodInfo{Pod: sanitizedPod})
|
||||
}
|
||||
|
||||
return sanitizedPods
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
|
||||
|
|
@ -31,7 +32,7 @@ import (
|
|||
func TestSanitizePods(t *testing.T) {
|
||||
pod := BuildTestPod("p1", 80, 0)
|
||||
pod.Spec.NodeName = "n1"
|
||||
pods := []*apiv1.Pod{pod}
|
||||
pods := []*framework.PodInfo{{Pod: pod}}
|
||||
|
||||
node := BuildTestNode("node", 1000, 1000)
|
||||
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ import (
|
|||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// ClusterNode captures a single entity of nodeInfo. i.e. Node specs and all the pods on that node.
|
||||
|
|
@ -98,7 +98,7 @@ func GetClusterNodeCopy(template *framework.NodeInfo) *ClusterNode {
|
|||
cNode := &ClusterNode{}
|
||||
cNode.Node = template.Node().DeepCopy()
|
||||
var pods []*v1.Pod
|
||||
for _, p := range template.Pods {
|
||||
for _, p := range template.Pods() {
|
||||
pods = append(pods, p.Pod.DeepCopy())
|
||||
}
|
||||
cNode.Pods = pods
|
||||
|
|
|
|||
|
|
@ -24,21 +24,17 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
func TestBasicSetterWorkflow(t *testing.T) {
|
||||
snapshot := &DebuggingSnapshotImpl{}
|
||||
pod := []*framework.PodInfo{
|
||||
{
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Pod1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "testNode",
|
||||
},
|
||||
},
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Pod1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "testNode",
|
||||
},
|
||||
}
|
||||
node := &v1.Node{
|
||||
|
|
@ -46,18 +42,10 @@ func TestBasicSetterWorkflow(t *testing.T) {
|
|||
Name: "testNode",
|
||||
},
|
||||
}
|
||||
|
||||
nodeInfo := &framework.NodeInfo{
|
||||
Pods: pod,
|
||||
Requested: &framework.Resource{},
|
||||
NonZeroRequested: &framework.Resource{},
|
||||
Allocatable: &framework.Resource{},
|
||||
Generation: 0,
|
||||
}
|
||||
nodeInfo := framework.NewTestNodeInfo(node, pod)
|
||||
|
||||
var nodeGroups []*framework.NodeInfo
|
||||
nodeGroups = append(nodeGroups, nodeInfo)
|
||||
nodeGroups[0].SetNode(node)
|
||||
timestamp := time.Now().In(time.UTC)
|
||||
snapshot.SetClusterNodes(nodeGroups)
|
||||
snapshot.SetEndTimestamp(timestamp)
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ import (
|
|||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// DebuggingSnapshotterState is the type for the debugging snapshot State machine
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
func TestBasicSnapshotRequest(t *testing.T) {
|
||||
|
|
@ -33,16 +33,12 @@ func TestBasicSnapshotRequest(t *testing.T) {
|
|||
wg.Add(1)
|
||||
snapshotter := NewDebuggingSnapshotter(true)
|
||||
|
||||
pod := []*framework.PodInfo{
|
||||
{
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Pod1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "testNode",
|
||||
},
|
||||
},
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Pod1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "testNode",
|
||||
},
|
||||
}
|
||||
node := &v1.Node{
|
||||
|
|
@ -50,18 +46,10 @@ func TestBasicSnapshotRequest(t *testing.T) {
|
|||
Name: "testNode",
|
||||
},
|
||||
}
|
||||
|
||||
nodeInfo := &framework.NodeInfo{
|
||||
Pods: pod,
|
||||
Requested: &framework.Resource{},
|
||||
NonZeroRequested: &framework.Resource{},
|
||||
Allocatable: &framework.Resource{},
|
||||
Generation: 0,
|
||||
}
|
||||
nodeInfo := framework.NewTestNodeInfo(node, pod)
|
||||
|
||||
var nodeGroups []*framework.NodeInfo
|
||||
nodeGroups = append(nodeGroups, nodeInfo)
|
||||
nodeGroups[0].SetNode(node)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
|
|
|||
|
|
@ -22,10 +22,10 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
// BinpackingNodeEstimator estimates the number of needed nodes to handle the given amount of pods.
|
||||
|
|
@ -89,7 +89,7 @@ func newEstimationState() *estimationState {
|
|||
// Returns the number of nodes needed to accommodate all pods from the list.
|
||||
func (e *BinpackingNodeEstimator) Estimate(
|
||||
podsEquivalenceGroups []PodEquivalenceGroup,
|
||||
nodeTemplate *schedulerframework.NodeInfo,
|
||||
nodeTemplate *framework.NodeInfo,
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
) (int, []*apiv1.Pod) {
|
||||
|
||||
|
|
@ -136,7 +136,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes(
|
|||
pod := pods[index]
|
||||
|
||||
// Check schedulability on all nodes created during simulation
|
||||
nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *schedulerframework.NodeInfo) bool {
|
||||
nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *framework.NodeInfo) bool {
|
||||
return estimationState.newNodeNames[nodeInfo.Node().Name]
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -152,7 +152,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes(
|
|||
|
||||
func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
|
||||
estimationState *estimationState,
|
||||
nodeTemplate *schedulerframework.NodeInfo,
|
||||
nodeTemplate *framework.NodeInfo,
|
||||
pods []*apiv1.Pod,
|
||||
) error {
|
||||
for _, pod := range pods {
|
||||
|
|
@ -208,11 +208,11 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
|
|||
|
||||
func (e *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
||||
estimationState *estimationState,
|
||||
template *schedulerframework.NodeInfo,
|
||||
template *framework.NodeInfo,
|
||||
) error {
|
||||
newNodeInfo := scheduler.DeepCopyTemplateNode(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex))
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range newNodeInfo.Pods {
|
||||
for _, podInfo := range newNodeInfo.Pods() {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
if err := e.clusterSnapshot.AddNodeWithPods(newNodeInfo.Node(), pods); err != nil {
|
||||
|
|
|
|||
|
|
@ -24,10 +24,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -222,8 +222,7 @@ func TestBinpackingEstimate(t *testing.T) {
|
|||
processor := NewDecreasingPodOrderer()
|
||||
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
|
||||
node := makeNode(tc.millicores, tc.memory, 10, "template", "zone-mars")
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewTestNodeInfo(node)
|
||||
|
||||
estimatedNodes, estimatedPods := estimator.Estimate(tc.podsEquivalenceGroup, nodeInfo, nil)
|
||||
assert.Equal(t, tc.expectNodeCount, estimatedNodes)
|
||||
|
|
@ -277,8 +276,7 @@ func BenchmarkBinpackingEstimate(b *testing.B) {
|
|||
processor := NewDecreasingPodOrderer()
|
||||
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
|
||||
node := makeNode(millicores, memory, podsPerNode, "template", "zone-mars")
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewTestNodeInfo(node)
|
||||
|
||||
estimatedNodes, estimatedPods := estimator.Estimate(podsEquivalenceGroup, nodeInfo, nil)
|
||||
assert.Equal(b, expectNodeCount, estimatedNodes)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// podScoreInfo contains Pod and score that corresponds to how important it is to handle the pod first.
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
func TestPodPriorityProcessor(t *testing.T) {
|
||||
|
|
@ -57,8 +57,7 @@ func TestPodPriorityProcessor(t *testing.T) {
|
|||
tc := tc
|
||||
t.Parallel()
|
||||
processor := NewDecreasingPodOrderer()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewTestNodeInfo(node)
|
||||
actual := processor.Order(tc.inputPodsEquivalentGroup, nodeInfo, nil)
|
||||
assert.Equal(t, tc.expectedPodsEquivalentGroup, actual)
|
||||
})
|
||||
|
|
|
|||
|
|
@ -22,9 +22,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -54,7 +53,7 @@ func (p *PodEquivalenceGroup) Exemplar() *apiv1.Pod {
|
|||
// to schedule on those nodes.
|
||||
type Estimator interface {
|
||||
// Estimate estimates how many nodes are needed to provision pods coming from the given equivalence groups.
|
||||
Estimate([]PodEquivalenceGroup, *schedulerframework.NodeInfo, cloudprovider.NodeGroup) (int, []*apiv1.Pod)
|
||||
Estimate([]PodEquivalenceGroup, *framework.NodeInfo, cloudprovider.NodeGroup) (int, []*apiv1.Pod)
|
||||
}
|
||||
|
||||
// EstimatorBuilder creates a new estimator object.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package expander
|
|||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -53,10 +53,10 @@ type Option struct {
|
|||
|
||||
// Strategy describes an interface for selecting the best option when scaling up
|
||||
type Strategy interface {
|
||||
BestOption(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) *Option
|
||||
BestOption(options []Option, nodeInfo map[string]*framework.NodeInfo) *Option
|
||||
}
|
||||
|
||||
// Filter describes an interface for filtering to equally good options according to some criteria
|
||||
type Filter interface {
|
||||
BestOptions(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) []Option
|
||||
BestOptions(options []Option, nodeInfo map[string]*framework.NodeInfo) []Option
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,8 +18,7 @@ package factory
|
|||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
type chainStrategy struct {
|
||||
|
|
@ -34,7 +33,7 @@ func newChainStrategy(filters []expander.Filter, fallback expander.Strategy) exp
|
|||
}
|
||||
}
|
||||
|
||||
func (c *chainStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
func (c *chainStrategy) BestOption(options []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||
filteredOptions := options
|
||||
for _, filter := range c.filters {
|
||||
filteredOptions = filter.BestOptions(filteredOptions, nodeInfo)
|
||||
|
|
|
|||
|
|
@ -17,12 +17,12 @@ limitations under the License.
|
|||
package factory
|
||||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
type substringTestFilterStrategy struct {
|
||||
|
|
@ -35,7 +35,7 @@ func newSubstringTestFilterStrategy(substring string) *substringTestFilterStrate
|
|||
}
|
||||
}
|
||||
|
||||
func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
var ret []expander.Option
|
||||
for _, option := range expansionOptions {
|
||||
if strings.Contains(option.Debug, s.substring) {
|
||||
|
|
@ -46,7 +46,7 @@ func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Op
|
|||
|
||||
}
|
||||
|
||||
func (s *substringTestFilterStrategy) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
func (s *substringTestFilterStrategy) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||
ret := s.BestOptions(expansionOptions, nodeInfo)
|
||||
if len(ret) == 0 {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
|
@ -72,7 +72,7 @@ func createGRPCClient(expanderCert string, expanderUrl string) protos.ExpanderCl
|
|||
return protos.NewExpanderClient(conn)
|
||||
}
|
||||
|
||||
func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
if g.grpcClient == nil {
|
||||
klog.Errorf("Incorrect gRPC client config, filtering no options")
|
||||
return expansionOptions
|
||||
|
|
@ -117,7 +117,7 @@ func populateOptionsForGRPC(expansionOptions []expander.Option) ([]*protos.Optio
|
|||
}
|
||||
|
||||
// populateNodeInfoForGRPC looks at the corresponding v1.Node object per NodeInfo object, and populates the grpcNodeInfoMap with these to pass over grpc
|
||||
func populateNodeInfoForGRPC(nodeInfos map[string]*schedulerframework.NodeInfo) map[string]*v1.Node {
|
||||
func populateNodeInfoForGRPC(nodeInfos map[string]*framework.NodeInfo) map[string]*v1.Node {
|
||||
grpcNodeInfoMap := make(map[string]*v1.Node)
|
||||
for nodeId, nodeInfo := range nodeInfos {
|
||||
grpcNodeInfoMap[nodeId] = nodeInfo.Node()
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/mocks"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
|
|
@ -124,11 +124,10 @@ func TestPopulateOptionsForGrpc(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func makeFakeNodeInfos() map[string]*schedulerframework.NodeInfo {
|
||||
nodeInfos := make(map[string]*schedulerframework.NodeInfo)
|
||||
func makeFakeNodeInfos() map[string]*framework.NodeInfo {
|
||||
nodeInfos := make(map[string]*framework.NodeInfo)
|
||||
for i, opt := range options {
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(nodes[i])
|
||||
nodeInfo := framework.NewTestNodeInfo(nodes[i])
|
||||
nodeInfos[opt.NodeGroup.Id()] = nodeInfo
|
||||
}
|
||||
return nodeInfos
|
||||
|
|
@ -251,7 +250,7 @@ func TestBestOptionsErrors(t *testing.T) {
|
|||
testCases := []struct {
|
||||
desc string
|
||||
client grpcclientstrategy
|
||||
nodeInfo map[string]*schedulerframework.NodeInfo
|
||||
nodeInfo map[string]*framework.NodeInfo
|
||||
mockResponse protos.BestOptionsResponse
|
||||
errResponse error
|
||||
}{
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"math"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
type leastnodes struct {
|
||||
|
|
@ -32,7 +32,7 @@ func NewFilter() expander.Filter {
|
|||
}
|
||||
|
||||
// BestOptions selects the expansion option that uses the least number of nodes
|
||||
func (m *leastnodes) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (m *leastnodes) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
leastNodes := math.MaxInt
|
||||
var leastOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package mostpods
|
|||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
type mostpods struct {
|
||||
|
|
@ -30,7 +30,7 @@ func NewFilter() expander.Filter {
|
|||
}
|
||||
|
||||
// BestOptions selects the expansion option that schedules the most pods
|
||||
func (m *mostpods) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (m *mostpods) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
var maxPods int
|
||||
var maxOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -26,9 +26,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
|
@ -87,7 +87,7 @@ func NewFilter(cloudProvider cloudprovider.CloudProvider,
|
|||
}
|
||||
|
||||
// BestOption selects option based on cost and preferred node type.
|
||||
func (p *priceBased) BestOptions(expansionOptions []expander.Option, nodeInfos map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (p *priceBased) BestOptions(expansionOptions []expander.Option, nodeInfos map[string]*framework.NodeInfo) []expander.Option {
|
||||
var bestOptions []expander.Option
|
||||
bestOptionScore := 0.0
|
||||
now := time.Now()
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -90,13 +90,10 @@ func TestPriceExpander(t *testing.T) {
|
|||
ng2, _ := provider.NodeGroupForNode(n2)
|
||||
ng3, _ := provider.NewNodeGroup("MT1", nil, nil, nil, nil)
|
||||
|
||||
ni1 := schedulerframework.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulerframework.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
ni3 := schedulerframework.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
||||
ni1 := framework.NewTestNodeInfo(n1)
|
||||
ni2 := framework.NewTestNodeInfo(n2)
|
||||
ni3 := framework.NewTestNodeInfo(n3)
|
||||
nodeInfosForGroups := map[string]*framework.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2,
|
||||
}
|
||||
var pricingModel cloudprovider.PricingModel
|
||||
|
|
|
|||
|
|
@ -26,10 +26,10 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
v1lister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -116,7 +116,7 @@ func (p *priority) parsePrioritiesYAMLString(prioritiesYAML string) (priorities,
|
|||
return newPriorities, nil
|
||||
}
|
||||
|
||||
func (p *priority) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (p *priority) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
if len(expansionOptions) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"math/rand"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
type random struct {
|
||||
|
|
@ -37,7 +37,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// BestOptions selects from the expansion options at random
|
||||
func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
best := r.BestOption(expansionOptions, nodeInfo)
|
||||
if best == nil {
|
||||
return nil
|
||||
|
|
@ -46,7 +46,7 @@ func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[st
|
|||
}
|
||||
|
||||
// BestOption selects from the expansion options at random
|
||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||
if len(expansionOptions) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
type leastwaste struct {
|
||||
|
|
@ -33,7 +33,7 @@ func NewFilter() expander.Filter {
|
|||
}
|
||||
|
||||
// BestOption Finds the option that wastes the least fraction of CPU and Memory
|
||||
func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
||||
func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||
var leastWastedScore float64
|
||||
var leastWastedOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
type FakeNodeGroup struct {
|
||||
|
|
@ -47,7 +47,7 @@ func (f *FakeNodeGroup) Debug() string { return f.id }
|
|||
func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||
return []cloudprovider.Instance{}, nil
|
||||
}
|
||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
func (f *FakeNodeGroup) Exist() bool { return true }
|
||||
|
|
@ -60,7 +60,7 @@ func (f *FakeNodeGroup) GetOptions(defaults config.NodeGroupAutoscalingOptions)
|
|||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeInfo {
|
||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *framework.NodeInfo {
|
||||
node := &apiv1.Node{
|
||||
Status: apiv1.NodeStatus{
|
||||
Capacity: apiv1.ResourceList{
|
||||
|
|
@ -73,8 +73,7 @@ func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeI
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := framework.NewTestNodeInfo(node)
|
||||
|
||||
return nodeInfo
|
||||
}
|
||||
|
|
@ -84,7 +83,7 @@ func TestLeastWaste(t *testing.T) {
|
|||
memoryPerPod := int64(1000 * 1024 * 1024)
|
||||
e := NewFilter()
|
||||
balancedNodeInfo := makeNodeInfo(16*cpuPerPod, 16*memoryPerPod, 100)
|
||||
nodeMap := map[string]*schedulerframework.NodeInfo{"balanced": balancedNodeInfo}
|
||||
nodeMap := map[string]*framework.NodeInfo{"balanced": balancedNodeInfo}
|
||||
balancedOption := expander.Option{NodeGroup: &FakeNodeGroup{"balanced"}, NodeCount: 1}
|
||||
|
||||
// Test without any pods, one node info
|
||||
|
|
|
|||
|
|
@ -20,14 +20,14 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// NodeGroupListProcessor processes lists of NodeGroups considered in scale-up.
|
||||
type NodeGroupListProcessor interface {
|
||||
Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error)
|
||||
nodeInfos map[string]*framework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error)
|
||||
CleanUp()
|
||||
}
|
||||
|
||||
|
|
@ -41,8 +41,8 @@ func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor {
|
|||
}
|
||||
|
||||
// Process processes lists of unschedulable and scheduled pods before scaling of the cluster.
|
||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) {
|
||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) {
|
||||
return nodeGroups, nodeInfos, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package nodegroupset
|
|||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// CreateAwsNodeInfoComparator returns a comparator that checks if two nodes should be considered
|
||||
|
|
@ -42,7 +42,7 @@ func CreateAwsNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config.N
|
|||
awsIgnoredLabels[k] = true
|
||||
}
|
||||
|
||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
return func(n1, n2 *framework.NodeInfo) bool {
|
||||
return IsCloudProviderNodeInfoSimilar(n1, n2, awsIgnoredLabels, ratioOpts)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package nodegroupset
|
|||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
)
|
||||
|
||||
// AzureNodepoolLegacyLabel is a label specifying which Azure node pool a particular node belongs to.
|
||||
|
|
@ -40,13 +40,13 @@ const aksConsolidatedAdditionalProperties = "kubernetes.azure.com/consolidated-a
|
|||
// AKS node image version
|
||||
const aksNodeImageVersion = "kubernetes.azure.com/node-image-version"
|
||||
|
||||
func nodesFromSameAzureNodePool(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
func nodesFromSameAzureNodePool(n1, n2 *framework.NodeInfo) bool {
|
||||
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLabel]
|
||||
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLabel]
|
||||
return (n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool) || nodesFromSameAzureNodePoolLegacy(n1, n2)
|
||||
}
|
||||
|
||||
func nodesFromSameAzureNodePoolLegacy(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
func nodesFromSameAzureNodePoolLegacy(n1, n2 *framework.NodeInfo) bool {
|
||||
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLegacyLabel]
|
||||
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLegacyLabel]
|
||||
return n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool
|
||||
|
|
@ -74,7 +74,7 @@ func CreateAzureNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config
|
|||
azureIgnoredLabels[k] = true
|
||||
}
|
||||
|
||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
return func(n1, n2 *framework.NodeInfo) bool {
|
||||
if nodesFromSameAzureNodePool(n1, n2) {
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ import (
|
|||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -110,12 +110,10 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) {
|
|||
provider.AddNode("ng1", n1)
|
||||
provider.AddNode("ng2", n2)
|
||||
|
||||
ni1 := schedulerframework.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulerframework.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
ni1 := framework.NewTestNodeInfo(n1)
|
||||
ni2 := framework.NewTestNodeInfo(n2)
|
||||
|
||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*framework.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2,
|
||||
}
|
||||
|
||||
|
|
@ -141,8 +139,7 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) {
|
|||
n3 := BuildTestNode("n1", 1000, 1000)
|
||||
provider.AddNodeGroup("ng3", 1, 10, 1)
|
||||
provider.AddNode("ng3", n3)
|
||||
ni3 := schedulerframework.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
ni3 := framework.NewTestNodeInfo(n3)
|
||||
nodeInfosForGroups["ng3"] = ni3
|
||||
ng3, _ := provider.NodeGroupForNode(n3)
|
||||
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
|
@ -35,7 +35,7 @@ type BalancingNodeGroupSetProcessor struct {
|
|||
// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one using the
|
||||
// BalancingNodeGroupSetProcessor's comparator function.
|
||||
func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
nodeInfosForGroups map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
|
||||
result := []cloudprovider.NodeGroup{}
|
||||
nodeGroupId := nodeGroup.Id()
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue