DRA: migrate all of CA to use the new internal NodeInfo/PodInfo
The new wrapper types should behave like the direct schedulerframework types for most purposes, so most of the migration is just changing the imported package. Constructors look a bit different, so they have to be adapted - mostly in test code. Accesses to the Pods field have to be changed to a method call. After this, the schedulerframework types are only used in the new wrappers, and in the parts of simulator/ that directly interact with the scheduler framework. The rest of CA codebase operates on the new wrapper types.
This commit is contained in:
parent
a329ac6601
commit
879c6a84a4
|
|
@ -22,8 +22,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Asg implements NodeGroup interface.
|
// Asg implements NodeGroup interface.
|
||||||
|
|
@ -179,7 +179,7 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (asg *Asg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
template, err := asg.manager.getAsgTemplate(asg.id)
|
template, err := asg.manager.getAsgTemplate(asg.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -191,8 +191,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.id)})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,10 +27,10 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -392,7 +392,7 @@ func (ng *AwsNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *AwsNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
template, err := ng.awsManager.getAsgTemplate(ng.asg)
|
template, err := ng.awsManager.getAsgTemplate(ng.asg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -403,8 +403,7 @@ func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.asg.Name)})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -34,8 +34,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -477,7 +477,7 @@ func (as *AgentPool) Debug() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this agent pool.
|
// TemplateNodeInfo returns a node template for this agent pool.
|
||||||
func (as *AgentPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (as *AgentPool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,8 +27,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
|
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||||
|
|
@ -627,7 +627,7 @@ func (scaleSet *ScaleSet) Debug() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this scale set.
|
// TemplateNodeInfo returns a node template for this scale set.
|
||||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (scaleSet *ScaleSet) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
template, err := scaleSet.getVMSSFromCache()
|
template, err := scaleSet.getVMSSFromCache()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -641,8 +641,7 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, erro
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(scaleSet.Name)})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1120,7 +1120,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
nodeInfo, err := asg.TemplateNodeInfo()
|
nodeInfo, err := asg.TemplateNodeInfo()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, nodeInfo)
|
assert.NotNil(t, nodeInfo)
|
||||||
assert.NotEmpty(t, nodeInfo.Pods)
|
assert.NotEmpty(t, nodeInfo.Pods())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Properly testing dynamic SKU list through skewer is not possible,
|
// Properly testing dynamic SKU list through skewer is not possible,
|
||||||
|
|
@ -1143,7 +1143,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, nodeInfo)
|
assert.NotNil(t, nodeInfo)
|
||||||
assert.NotEmpty(t, nodeInfo.Pods)
|
assert.NotEmpty(t, nodeInfo.Pods())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Checking static workflow if dynamic fails", func(t *testing.T) {
|
t.Run("Checking static workflow if dynamic fails", func(t *testing.T) {
|
||||||
|
|
@ -1164,7 +1164,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, nodeInfo)
|
assert.NotNil(t, nodeInfo)
|
||||||
assert.NotEmpty(t, nodeInfo.Pods)
|
assert.NotEmpty(t, nodeInfo.Pods())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Fails to find vmss instance information using static and dynamic workflow, instance not supported", func(t *testing.T) {
|
t.Run("Fails to find vmss instance information using static and dynamic workflow, instance not supported", func(t *testing.T) {
|
||||||
|
|
@ -1198,7 +1198,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
assert.Equal(t, *nodeInfo.Node().Status.Capacity.Memory(), *resource.NewQuantity(3*1024*1024, resource.DecimalSI))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, nodeInfo)
|
assert.NotNil(t, nodeInfo)
|
||||||
assert.NotEmpty(t, nodeInfo.Pods)
|
assert.NotEmpty(t, nodeInfo.Pods())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Checking static-only workflow with built-in SKU list", func(t *testing.T) {
|
t.Run("Checking static-only workflow with built-in SKU list", func(t *testing.T) {
|
||||||
|
|
@ -1207,7 +1207,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
nodeInfo, err := asg.TemplateNodeInfo()
|
nodeInfo, err := asg.TemplateNodeInfo()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, nodeInfo)
|
assert.NotNil(t, nodeInfo)
|
||||||
assert.NotEmpty(t, nodeInfo.Pods)
|
assert.NotEmpty(t, nodeInfo.Pods())
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// VMsPool is single instance VM pool
|
// VMsPool is single instance VM pool
|
||||||
|
|
@ -169,7 +169,7 @@ func (agentPool *VMsPool) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo is not implemented.
|
// TemplateNodeInfo is not implemented.
|
||||||
func (agentPool *VMsPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (agentPool *VMsPool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,10 +27,10 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -365,13 +365,13 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return instances, nil
|
return instances, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (asg *Asg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
template, err := asg.baiducloudManager.getAsgTemplate(asg.Name)
|
template, err := asg.baiducloudManager.getAsgTemplate(asg.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -380,8 +380,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.Name)})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -183,14 +183,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return toInstances(n.nodePool.Nodes), nil
|
return toInstances(n.nodePool.Nodes), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
|
|
@ -239,13 +240,13 @@ func (ng *brightboxNodeGroup) Exist() bool {
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *brightboxNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
klog.V(4).Info("TemplateNodeInfo")
|
klog.V(4).Info("TemplateNodeInfo")
|
||||||
klog.V(4).Infof("Looking for server type %q", ng.serverOptions.ServerType)
|
klog.V(4).Infof("Looking for server type %q", ng.serverOptions.ServerType)
|
||||||
serverType, err := ng.findServerType()
|
serverType, err := ng.findServerType()
|
||||||
|
|
@ -268,8 +269,7 @@ func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo,
|
||||||
Conditions: cloudprovider.BuildReadyConditions(),
|
Conditions: cloudprovider.BuildReadyConditions(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -316,7 +316,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
Return(fakeServerTypezx45f(), nil)
|
Return(fakeServerTypezx45f(), nil)
|
||||||
obj, err := makeFakeNodeGroup(t, testclient).TemplateNodeInfo()
|
obj, err := makeFakeNodeGroup(t, testclient).TemplateNodeInfo()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, fakeResource(), obj.Allocatable)
|
assert.Equal(t, fakeResource(), obj.ToScheduler().Allocatable)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeGroupErrors(t *testing.T) {
|
func TestNodeGroupErrors(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -45,7 +45,7 @@ type cherryManager interface {
|
||||||
getNodes(nodegroup string) ([]string, error)
|
getNodes(nodegroup string) ([]string, error)
|
||||||
getNodeNames(nodegroup string) ([]string, error)
|
getNodeNames(nodegroup string) ([]string, error)
|
||||||
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
||||||
templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error)
|
templateNodeInfo(nodegroup string) (*framework.NodeInfo, error)
|
||||||
NodeGroupForNode(labels map[string]string, nodeId string) (string, error)
|
NodeGroupForNode(labels map[string]string, nodeId string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -42,10 +42,10 @@ import (
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/version"
|
"k8s.io/autoscaler/cluster-autoscaler/version"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -618,7 +618,7 @@ func BuildGenericLabels(nodegroup string, plan *Plan) map[string]string {
|
||||||
|
|
||||||
// templateNodeInfo returns a NodeInfo with a node template based on the Cherry Servers plan
|
// templateNodeInfo returns a NodeInfo with a node template based on the Cherry Servers plan
|
||||||
// that is used to create nodes in a given node group.
|
// that is used to create nodes in a given node group.
|
||||||
func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) {
|
||||||
node := apiv1.Node{}
|
node := apiv1.Node{}
|
||||||
nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63())
|
nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63())
|
||||||
node.ObjectMeta = metav1.ObjectMeta{
|
node.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
|
@ -664,8 +664,7 @@ func (mgr *cherryManagerRest) templateNodeInfo(nodegroup string) (*schedulerfram
|
||||||
// GenericLabels
|
// GenericLabels
|
||||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, cherryPlan))
|
node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, cherryPlan))
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodegroup)})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -269,7 +269,7 @@ func (ng *cherryNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (ng *cherryNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *cherryNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return ng.cherryManager.templateNodeInfo(ng.id)
|
return ng.cherryManager.templateNodeInfo(ng.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,9 +28,9 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
autoscaler "k8s.io/autoscaler/cluster-autoscaler/config"
|
autoscaler "k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
||||||
|
|
@ -208,15 +208,13 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
node, err := n.buildNodeFromTemplate(n.Id(), n.nodeTemplate)
|
node, err := n.buildNodeFromTemplate(n.Id(), n.nodeTemplate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to build node from template")
|
return nil, fmt.Errorf("failed to build node from template")
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.Id()))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.Id())})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -540,7 +540,7 @@ func TestNodeGroup_TemplateNodeInfo(t *testing.T) {
|
||||||
|
|
||||||
nodeInfo, err := ng.TemplateNodeInfo()
|
nodeInfo, err := ng.TemplateNodeInfo()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, len(nodeInfo.Pods), 1, "should have one template pod")
|
assert.Equal(t, len(nodeInfo.Pods()), 1, "should have one template pod")
|
||||||
assert.Equal(t, nodeInfo.Node().Status.Capacity.Cpu().ToDec().Value(), int64(1000), "should match cpu capacity ")
|
assert.Equal(t, nodeInfo.Node().Status.Capacity.Cpu().ToDec().Value(), int64(1000), "should match cpu capacity ")
|
||||||
assert.Equal(t, nodeInfo.Node().Status.Capacity.Memory().ToDec().Value(), int64(1073741824), "should match memory capacity")
|
assert.Equal(t, nodeInfo.Node().Status.Capacity.Memory().ToDec().Value(), int64(1073741824), "should match memory capacity")
|
||||||
assert.Equal(t, nodeInfo.Node().Status.Capacity.StorageEphemeral().ToDec().Value(), int64(21474836480), "should match epheral storage capacity")
|
assert.Equal(t, nodeInfo.Node().Status.Capacity.StorageEphemeral().ToDec().Value(), int64(21474836480), "should match epheral storage capacity")
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -214,13 +214,13 @@ type NodeGroup interface {
|
||||||
// This list should include also instances that might have not become a kubernetes node yet.
|
// This list should include also instances that might have not become a kubernetes node yet.
|
||||||
Nodes() ([]Instance, error)
|
Nodes() ([]Instance, error)
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
TemplateNodeInfo() (*schedulerframework.NodeInfo, error)
|
TemplateNodeInfo() (*framework.NodeInfo, error)
|
||||||
|
|
||||||
// Exist checks if the node group really exists on the cloud provider side. Allows to tell the
|
// Exist checks if the node group really exists on the cloud provider side. Allows to tell the
|
||||||
// theoretical node group from the real one. Implementation required.
|
// theoretical node group from the real one. Implementation required.
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// asg implements NodeGroup interface.
|
// asg implements NodeGroup interface.
|
||||||
|
|
@ -168,7 +168,7 @@ func (asg *asg) Delete() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (asg *asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (asg *asg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ import (
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
|
@ -250,7 +250,7 @@ func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
// allocatable information as well as all pods that are started on the
|
// allocatable information as well as all pods that are started on the
|
||||||
// node by default, using manifest (most likely only kube-proxy).
|
// node by default, using manifest (most likely only kube-proxy).
|
||||||
// Implementation optional.
|
// Implementation optional.
|
||||||
func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *nodegroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
if !ng.scalableResource.CanScaleFromZero() {
|
if !ng.scalableResource.CanScaleFromZero() {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
@ -278,9 +278,7 @@ func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.scalableResource.Name()))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.scalableResource.Name())})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -200,14 +200,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return toInstances(n.nodePool.Nodes), nil
|
return toInstances(n.nodePool.Nodes), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -45,7 +45,7 @@ type equinixMetalManager interface {
|
||||||
getNodes(nodegroup string) ([]string, error)
|
getNodes(nodegroup string) ([]string, error)
|
||||||
getNodeNames(nodegroup string) ([]string, error)
|
getNodeNames(nodegroup string) ([]string, error)
|
||||||
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
||||||
templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error)
|
templateNodeInfo(nodegroup string) (*framework.NodeInfo, error)
|
||||||
NodeGroupForNode(labels map[string]string, nodeId string) (string, error)
|
NodeGroupForNode(labels map[string]string, nodeId string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,10 +38,10 @@ import (
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/version"
|
"k8s.io/autoscaler/cluster-autoscaler/version"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -689,7 +689,7 @@ func BuildGenericLabels(nodegroup string, instanceType string) map[string]string
|
||||||
|
|
||||||
// templateNodeInfo returns a NodeInfo with a node template based on the equinix metal plan
|
// templateNodeInfo returns a NodeInfo with a node template based on the equinix metal plan
|
||||||
// that is used to create nodes in a given node group.
|
// that is used to create nodes in a given node group.
|
||||||
func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*framework.NodeInfo, error) {
|
||||||
node := apiv1.Node{}
|
node := apiv1.Node{}
|
||||||
nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63())
|
nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63())
|
||||||
node.ObjectMeta = metav1.ObjectMeta{
|
node.ObjectMeta = metav1.ObjectMeta{
|
||||||
|
|
@ -716,8 +716,7 @@ func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedul
|
||||||
// GenericLabels
|
// GenericLabels
|
||||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan))
|
node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan))
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodegroup)})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// equinixMetalNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider.
|
// equinixMetalNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider.
|
||||||
|
|
@ -260,7 +260,7 @@ func (ng *equinixMetalNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return ng.equinixMetalManager.templateNodeInfo(ng.id)
|
return ng.equinixMetalManager.templateNodeInfo(ng.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// instancePoolNodeGroup implements cloudprovider.NodeGroup interface for Exoscale Instance Pools.
|
// instancePoolNodeGroup implements cloudprovider.NodeGroup interface for Exoscale Instance Pools.
|
||||||
|
|
@ -170,13 +170,13 @@ func (n *instancePoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (n *instancePoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *instancePoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -187,13 +187,13 @@ func (n *sksNodepoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (n *sksNodepoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *sksNodepoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,8 +28,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/externalgrpc/protos"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/externalgrpc/protos"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
||||||
|
|
@ -44,7 +44,7 @@ type NodeGroup struct {
|
||||||
grpcTimeout time.Duration
|
grpcTimeout time.Duration
|
||||||
|
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
nodeInfo **schedulerframework.NodeInfo // used to cache NodeGroupTemplateNodeInfo() grpc calls
|
nodeInfo **framework.NodeInfo // used to cache NodeGroupTemplateNodeInfo() grpc calls
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxSize returns maximum size of the node group.
|
// MaxSize returns maximum size of the node group.
|
||||||
|
|
@ -188,7 +188,7 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return instances, nil
|
return instances, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
|
|
@ -200,7 +200,7 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
// complex approach and does not cover all the scenarios. For the sake of simplicity,
|
// complex approach and does not cover all the scenarios. For the sake of simplicity,
|
||||||
// the `nodeInfo` is defined as a Kubernetes `k8s.io.api.core.v1.Node` type
|
// the `nodeInfo` is defined as a Kubernetes `k8s.io.api.core.v1.Node` type
|
||||||
// where the system could still extract certain info about the node.
|
// where the system could still extract certain info about the node.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
n.mutex.Lock()
|
n.mutex.Lock()
|
||||||
defer n.mutex.Unlock()
|
defer n.mutex.Unlock()
|
||||||
|
|
||||||
|
|
@ -224,11 +224,10 @@ func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
}
|
}
|
||||||
pbNodeInfo := res.GetNodeInfo()
|
pbNodeInfo := res.GetNodeInfo()
|
||||||
if pbNodeInfo == nil {
|
if pbNodeInfo == nil {
|
||||||
n.nodeInfo = new(*schedulerframework.NodeInfo)
|
n.nodeInfo = new(*framework.NodeInfo)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewNodeInfo(pbNodeInfo, nil)
|
||||||
nodeInfo.SetNode(pbNodeInfo)
|
|
||||||
n.nodeInfo = &nodeInfo
|
n.nodeInfo = &nodeInfo
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26,10 +26,10 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -361,13 +361,12 @@ func (mig *gceMig) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*con
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (mig *gceMig) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (mig *gceMig) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
node, err := mig.gceManager.GetMigTemplateNode(mig)
|
node, err := mig.gceManager.GetMigTemplateNode(mig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(mig.Id())})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -31,8 +31,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/hetzner/hcloud-go/hcloud"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/hetzner/hcloud-go/hcloud"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// hetznerNodeGroup implements cloudprovider.NodeGroup interface. hetznerNodeGroup contains
|
// hetznerNodeGroup implements cloudprovider.NodeGroup interface. hetznerNodeGroup contains
|
||||||
|
|
@ -251,14 +251,14 @@ func (n *hetznerNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return instances, nil
|
return instances, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *hetznerNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *hetznerNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
resourceList, err := getMachineTypeResourceList(n.manager, n.instanceType)
|
resourceList, err := getMachineTypeResourceList(n.manager, n.instanceType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err)
|
return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err)
|
||||||
|
|
@ -297,9 +297,7 @@ func (n *hetznerNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.id))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.id)})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,10 +26,10 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
huaweicloudsdkasmodel "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/as/v1/model"
|
huaweicloudsdkasmodel "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/as/v1/model"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AutoScalingGroup represents a HuaweiCloud's 'Auto Scaling Group' which also can be treated as a node group.
|
// AutoScalingGroup represents a HuaweiCloud's 'Auto Scaling Group' which also can be treated as a node group.
|
||||||
|
|
@ -180,13 +180,13 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return instances, nil
|
return instances, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (asg *AutoScalingGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
template, err := asg.cloudServiceManager.getAsgTemplate(asg.groupID)
|
template, err := asg.cloudServiceManager.getAsgTemplate(asg.groupID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -195,8 +195,7 @@ func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, e
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.groupName))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.groupName)})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,10 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -144,14 +144,14 @@ func (n *nodePool) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return n.manager.GetInstancesForNodeGroup(n)
|
return n.manager.GetInstancesForNodeGroup(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *nodePool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *nodePool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,16 +19,17 @@ package kamatera
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
// NodeGroup implements cloudprovider.NodeGroup interface. NodeGroup contains
|
||||||
|
|
@ -147,13 +148,13 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return instances, nil
|
return instances, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
resourceList, err := n.getResourceList()
|
resourceList, err := n.getResourceList()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err)
|
return nil, fmt.Errorf("failed to create resource list for node group %s error: %v", n.id, err)
|
||||||
|
|
@ -171,9 +172,7 @@ func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
node.Status.Allocatable = node.Status.Capacity
|
node.Status.Allocatable = node.Status.Capacity
|
||||||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(n.id))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(n.id)})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
|
|
@ -39,7 +40,6 @@ import (
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/kubernetes/pkg/kubemark"
|
"k8s.io/kubernetes/pkg/kubemark"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
@ -290,7 +290,7 @@ func (nodeGroup *NodeGroup) DecreaseTargetSize(delta int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -186,10 +186,8 @@ func (nodeGroup *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodeGroup.Id()))
|
nodeInfo := framework.NewNodeInfo(nodeGroup.nodeTemplate, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(nodeGroup.Id())})
|
||||||
nodeInfo.SetNode(nodeGroup.nodeTemplate)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -305,8 +305,8 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
ti, err := ng.TemplateNodeInfo()
|
ti, err := ng.TemplateNodeInfo()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, ti)
|
assert.NotNil(t, ti)
|
||||||
assert.Len(t, ti.Pods, 1)
|
assert.Len(t, ti.Pods(), 1)
|
||||||
assert.Contains(t, ti.Pods[0].Pod.Name, fmt.Sprintf("kube-proxy-%s", ng.name))
|
assert.Contains(t, ti.Pods()[0].Pod.Name, fmt.Sprintf("kube-proxy-%s", ng.name))
|
||||||
assert.Equal(t, ng.nodeTemplate, ti.Node())
|
assert.Equal(t, ng.nodeTemplate, ti.Node())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/linode/linodego"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/linode/linodego"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -186,14 +186,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// How long to sleep after deleting nodes, to ensure that multiple requests arrive in order.
|
// How long to sleep after deleting nodes, to ensure that multiple requests arrive in order.
|
||||||
|
|
@ -206,7 +206,7 @@ func (ng *magnumNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (ng *magnumNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *magnumNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
||||||
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
config "k8s.io/autoscaler/cluster-autoscaler/config"
|
config "k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,15 +6,16 @@ package instancepools
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
||||||
ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// InstancePoolNodeGroup implements the NodeGroup interface using OCI instance pools.
|
// InstancePoolNodeGroup implements the NodeGroup interface using OCI instance pools.
|
||||||
|
|
@ -172,23 +173,23 @@ func (ip *InstancePoolNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return ip.manager.GetInstancePoolNodes(*ip)
|
return ip.manager.GetInstancePoolNodes(*ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a instance-pool was expanded. The returned
|
// predict what would a new node look like if a instance-pool was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (ip *InstancePoolNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ip *InstancePoolNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
node, err := ip.manager.GetInstancePoolTemplateNode(*ip)
|
node, err := ip.manager.GetInstancePoolTemplateNode(*ip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "unable to build node info template")
|
return nil, errors.Wrap(err, "unable to build node info template")
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(
|
nodeInfo := framework.NewNodeInfo(
|
||||||
cloudprovider.BuildKubeProxy(ip.id),
|
node, nil,
|
||||||
ocicommon.BuildCSINodePod(),
|
&framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ip.id)},
|
||||||
|
&framework.PodInfo{Pod: ocicommon.BuildCSINodePod()},
|
||||||
)
|
)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common"
|
||||||
)
|
)
|
||||||
|
|
@ -273,24 +273,24 @@ func (np *nodePool) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return np.manager.GetNodePoolNodes(np)
|
return np.manager.GetNodePoolNodes(np)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (np *nodePool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (np *nodePool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
node, err := np.manager.GetNodePoolTemplateNode(np)
|
node, err := np.manager.GetNodePoolTemplateNode(np)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "unable to build node pool template")
|
return nil, errors.Wrap(err, "unable to build node pool template")
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(
|
nodeInfo := framework.NewNodeInfo(
|
||||||
cloudprovider.BuildKubeProxy(np.id),
|
node, nil,
|
||||||
ocicommon.BuildFlannelPod(),
|
&framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(np.id)},
|
||||||
ocicommon.BuildProxymuxClientPod(),
|
&framework.PodInfo{Pod: ocicommon.BuildFlannelPod()},
|
||||||
|
&framework.PodInfo{Pod: ocicommon.BuildProxymuxClientPod()},
|
||||||
)
|
)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,8 +28,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/ovhcloud/sdk"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/ovhcloud/sdk"
|
||||||
|
|
@ -215,7 +215,7 @@ func (ng *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
// Forge node template in a node group
|
// Forge node template in a node group
|
||||||
node := &apiv1.Node{
|
node := &apiv1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -252,9 +252,7 @@ func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
node.Status.Allocatable = node.Status.Capacity
|
node.Status.Allocatable = node.Status.Capacity
|
||||||
|
|
||||||
// Setup node info template
|
// Setup node info template
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -31,8 +31,8 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
provisioningv1 "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/rancher/provisioning.cattle.io/v1"
|
provisioningv1 "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/rancher/provisioning.cattle.io/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -196,7 +196,7 @@ func (ng *nodeGroup) DecreaseTargetSize(delta int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (ng *nodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *nodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
node := &corev1.Node{
|
node := &corev1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: fmt.Sprintf("%s-%s-%d", ng.provider.config.ClusterName, ng.Id(), rand.Int63()),
|
Name: fmt.Sprintf("%s-%s-%d", ng.provider.config.ClusterName, ng.Id(), rand.Int63()),
|
||||||
|
|
@ -216,9 +216,7 @@ func (ng *nodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
node.Status.Allocatable = node.Status.Capacity
|
node.Status.Allocatable = node.Status.Capacity
|
||||||
|
|
||||||
// Setup node info template
|
// Setup node info template
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.Id())})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -396,19 +396,19 @@ func TestTemplateNodeInfo(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if nodeInfo.Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() {
|
if nodeInfo.ToScheduler().Allocatable.MilliCPU != ng.resources.Cpu().MilliValue() {
|
||||||
t.Fatalf("expected nodeInfo to have %v MilliCPU, got %v",
|
t.Fatalf("expected nodeInfo to have %v MilliCPU, got %v",
|
||||||
ng.resources.Cpu().MilliValue(), nodeInfo.Allocatable.MilliCPU)
|
ng.resources.Cpu().MilliValue(), nodeInfo.ToScheduler().Allocatable.MilliCPU)
|
||||||
}
|
}
|
||||||
|
|
||||||
if nodeInfo.Allocatable.Memory != ng.resources.Memory().Value() {
|
if nodeInfo.ToScheduler().Allocatable.Memory != ng.resources.Memory().Value() {
|
||||||
t.Fatalf("expected nodeInfo to have %v Memory, got %v",
|
t.Fatalf("expected nodeInfo to have %v Memory, got %v",
|
||||||
ng.resources.Memory().Value(), nodeInfo.Allocatable.Memory)
|
ng.resources.Memory().Value(), nodeInfo.ToScheduler().Allocatable.Memory)
|
||||||
}
|
}
|
||||||
|
|
||||||
if nodeInfo.Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() {
|
if nodeInfo.ToScheduler().Allocatable.EphemeralStorage != ng.resources.StorageEphemeral().Value() {
|
||||||
t.Fatalf("expected nodeInfo to have %v ephemeral storage, got %v",
|
t.Fatalf("expected nodeInfo to have %v ephemeral storage, got %v",
|
||||||
ng.resources.StorageEphemeral().Value(), nodeInfo.Allocatable.EphemeralStorage)
|
ng.resources.StorageEphemeral().Value(), nodeInfo.ToScheduler().Allocatable.EphemeralStorage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,16 +20,17 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway/scalewaygo"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway/scalewaygo"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeGroup implements cloudprovider.NodeGroup interface.
|
// NodeGroup implements cloudprovider.NodeGroup interface.
|
||||||
|
|
@ -198,13 +199,13 @@ func (ng *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy).
|
// the node by default, using manifest (most likely only kube-proxy).
|
||||||
func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (ng *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
klog.V(4).Infof("TemplateNodeInfo,PoolID=%s", ng.p.ID)
|
klog.V(4).Infof("TemplateNodeInfo,PoolID=%s", ng.p.ID)
|
||||||
node := apiv1.Node{
|
node := apiv1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -235,8 +236,7 @@ func (ng *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||||
node.Spec.Taints = parseTaints(ng.specs.Taints)
|
node.Spec.Taints = parseTaints(ng.specs.Taints)
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.p.Name))
|
nodeInfo := framework.NewNodeInfo(&node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(ng.p.Name)})
|
||||||
nodeInfo.SetNode(&node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TcRef contains a reference to some entity in Tencentcloud/TKE world.
|
// TcRef contains a reference to some entity in Tencentcloud/TKE world.
|
||||||
|
|
@ -247,15 +247,14 @@ func (asg *tcAsg) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (asg *tcAsg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (asg *tcAsg) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
node, err := asg.tencentcloudManager.GetAsgTemplateNode(asg)
|
node, err := asg.tencentcloudManager.GetAsgTemplateNode(asg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("Generate tencentcloud template: labels=%v taints=%v allocatable=%v", node.Labels, node.Spec.Taints, node.Status.Allocatable)
|
klog.V(4).Infof("Generate tencentcloud template: labels=%v taints=%v allocatable=%v", node.Labels, node.Spec.Taints, node.Status.Allocatable)
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewNodeInfo(node, nil)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,9 +24,9 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// OnScaleUpFunc is a function called on node group increase in TestCloudProvider.
|
// OnScaleUpFunc is a function called on node group increase in TestCloudProvider.
|
||||||
|
|
@ -56,7 +56,7 @@ type TestCloudProvider struct {
|
||||||
onNodeGroupDelete func(string) error
|
onNodeGroupDelete func(string) error
|
||||||
hasInstance func(string) (bool, error)
|
hasInstance func(string) (bool, error)
|
||||||
machineTypes []string
|
machineTypes []string
|
||||||
machineTemplates map[string]*schedulerframework.NodeInfo
|
machineTemplates map[string]*framework.NodeInfo
|
||||||
priceModel cloudprovider.PricingModel
|
priceModel cloudprovider.PricingModel
|
||||||
resourceLimiter *cloudprovider.ResourceLimiter
|
resourceLimiter *cloudprovider.ResourceLimiter
|
||||||
}
|
}
|
||||||
|
|
@ -75,7 +75,7 @@ func NewTestCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc)
|
||||||
// NewTestAutoprovisioningCloudProvider builds new TestCloudProvider with autoprovisioning support
|
// NewTestAutoprovisioningCloudProvider builds new TestCloudProvider with autoprovisioning support
|
||||||
func NewTestAutoprovisioningCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc,
|
func NewTestAutoprovisioningCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc,
|
||||||
onNodeGroupCreate OnNodeGroupCreateFunc, onNodeGroupDelete OnNodeGroupDeleteFunc,
|
onNodeGroupCreate OnNodeGroupCreateFunc, onNodeGroupDelete OnNodeGroupDeleteFunc,
|
||||||
machineTypes []string, machineTemplates map[string]*schedulerframework.NodeInfo) *TestCloudProvider {
|
machineTypes []string, machineTemplates map[string]*framework.NodeInfo) *TestCloudProvider {
|
||||||
return &TestCloudProvider{
|
return &TestCloudProvider{
|
||||||
nodes: make(map[string]string),
|
nodes: make(map[string]string),
|
||||||
groups: make(map[string]cloudprovider.NodeGroup),
|
groups: make(map[string]cloudprovider.NodeGroup),
|
||||||
|
|
@ -494,7 +494,7 @@ func (tng *TestNodeGroup) Autoprovisioned() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a node template for this node group.
|
// TemplateNodeInfo returns a node template for this node group.
|
||||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (tng *TestNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
if tng.cloudProvider.machineTemplates == nil {
|
if tng.cloudProvider.machineTemplates == nil {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AutoScalingGroup represents a Volcengine 'Auto Scaling Group' which also can be treated as a node group.
|
// AutoScalingGroup represents a Volcengine 'Auto Scaling Group' which also can be treated as a node group.
|
||||||
|
|
@ -169,13 +169,13 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The returned
|
// predict what would a new node look like if a node group was expanded. The returned
|
||||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||||
// capacity and allocatable information as well as all pods that are started on
|
// capacity and allocatable information as well as all pods that are started on
|
||||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (asg *AutoScalingGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
template, err := asg.manager.getAsgTemplate(asg.asgId)
|
template, err := asg.manager.getAsgTemplate(asg.asgId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -184,8 +184,7 @@ func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, e
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.asgId))
|
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(asg.asgId)})
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
return nodeInfo, nil
|
return nodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/vultr/govultr"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/vultr/govultr"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -193,14 +193,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
// TemplateNodeInfo returns a framework.NodeInfo structure of an empty
|
||||||
// (as if just started) node. This will be used in scale-up simulations to
|
// (as if just started) node. This will be used in scale-up simulations to
|
||||||
// predict what would a new node look like if a node group was expanded. The
|
// predict what would a new node look like if a node group was expanded. The
|
||||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||||
// all of the labels, capacity and allocatable information as well as all pods
|
// all of the labels, capacity and allocatable information as well as all pods
|
||||||
// that are started on the node by default, using manifest (most likely only
|
// that are started on the node by default, using manifest (most likely only
|
||||||
// kube-proxy). Implementation optional.
|
// kube-proxy). Implementation optional.
|
||||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (n *NodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
|
|
@ -38,7 +39,6 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
@ -124,7 +124,7 @@ type ClusterStateRegistry struct {
|
||||||
scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest
|
scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest
|
||||||
scaleDownRequests []*ScaleDownRequest
|
scaleDownRequests []*ScaleDownRequest
|
||||||
nodes []*apiv1.Node
|
nodes []*apiv1.Node
|
||||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo
|
nodeInfosForGroups map[string]*framework.NodeInfo
|
||||||
cloudProvider cloudprovider.CloudProvider
|
cloudProvider cloudprovider.CloudProvider
|
||||||
perNodeGroupReadiness map[string]Readiness
|
perNodeGroupReadiness map[string]Readiness
|
||||||
totalReadiness Readiness
|
totalReadiness Readiness
|
||||||
|
|
@ -338,7 +338,7 @@ func (csr *ClusterStateRegistry) registerFailedScaleUpNoLock(nodeGroup cloudprov
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats
|
// UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats
|
||||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) error {
|
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*framework.NodeInfo, currentTime time.Time) error {
|
||||||
csr.updateNodeGroupMetrics()
|
csr.updateNodeGroupMetrics()
|
||||||
targetSizes, err := getTargetSizes(csr.cloudProvider)
|
targetSizes, err := getTargetSizes(csr.cloudProvider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -45,12 +45,12 @@ func currentlyDrainedPods(context *context.AutoscalingContext) []*apiv1.Pod {
|
||||||
var pods []*apiv1.Pod
|
var pods []*apiv1.Pod
|
||||||
_, nodeNames := context.ScaleDownActuator.CheckStatus().DeletionsInProgress()
|
_, nodeNames := context.ScaleDownActuator.CheckStatus().DeletionsInProgress()
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(nodeName)
|
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("Couldn't get node %v info, assuming the node got deleted already: %v", nodeName, err)
|
klog.Warningf("Couldn't get node %v info, assuming the node got deleted already: %v", nodeName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, podInfo := range nodeInfo.Pods {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
// Filter out pods that has deletion timestamp set
|
// Filter out pods that has deletion timestamp set
|
||||||
if podInfo.Pod.DeletionTimestamp != nil {
|
if podInfo.Pod.DeletionTimestamp != nil {
|
||||||
klog.Infof("Pod %v has deletion timestamp set, skipping injection to unschedulable pods list", podInfo.Pod.Name)
|
klog.Infof("Pod %v has deletion timestamp set, skipping injection to unschedulable pods list", podInfo.Pod.Name)
|
||||||
|
|
|
||||||
|
|
@ -125,13 +125,12 @@ func TestFilterOutExpendable(t *testing.T) {
|
||||||
assert.ElementsMatch(t, tc.wantPods, pods)
|
assert.ElementsMatch(t, tc.wantPods, pods)
|
||||||
|
|
||||||
var podsInSnapshot []*apiv1.Pod
|
var podsInSnapshot []*apiv1.Pod
|
||||||
nodeInfoLister := snapshot.NodeInfos()
|
|
||||||
// Get pods in snapshot
|
// Get pods in snapshot
|
||||||
for _, n := range tc.nodes {
|
for _, n := range tc.nodes {
|
||||||
nodeInfo, err := nodeInfoLister.Get(n.Name)
|
nodeInfo, err := snapshot.GetNodeInfo(n.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotEqual(t, nodeInfo.Pods, nil)
|
assert.NotEqual(t, nodeInfo.Pods(), nil)
|
||||||
for _, podInfo := range nodeInfo.Pods {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
podsInSnapshot = append(podsInSnapshot, podInfo.Pod)
|
podsInSnapshot = append(podsInSnapshot, podInfo.Pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,20 +25,20 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
|
||||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type filterOutSchedulablePodListProcessor struct {
|
type filterOutSchedulablePodListProcessor struct {
|
||||||
schedulingSimulator *scheduling.HintingSimulator
|
schedulingSimulator *scheduling.HintingSimulator
|
||||||
nodeFilter func(*schedulerframework.NodeInfo) bool
|
nodeFilter func(*framework.NodeInfo) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFilterOutSchedulablePodListProcessor creates a PodListProcessor filtering out schedulable pods
|
// NewFilterOutSchedulablePodListProcessor creates a PodListProcessor filtering out schedulable pods
|
||||||
func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*schedulerframework.NodeInfo) bool) *filterOutSchedulablePodListProcessor {
|
func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *filterOutSchedulablePodListProcessor {
|
||||||
return &filterOutSchedulablePodListProcessor{
|
return &filterOutSchedulablePodListProcessor{
|
||||||
schedulingSimulator: scheduling.NewHintingSimulator(predicateChecker),
|
schedulingSimulator: scheduling.NewHintingSimulator(predicateChecker),
|
||||||
nodeFilter: nodeFilter,
|
nodeFilter: nodeFilter,
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,10 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -35,15 +35,15 @@ func TestFilterOutSchedulable(t *testing.T) {
|
||||||
schedulermetrics.Register()
|
schedulermetrics.Register()
|
||||||
|
|
||||||
node := buildReadyTestNode("node", 2000, 100)
|
node := buildReadyTestNode("node", 2000, 100)
|
||||||
matchesAllNodes := func(*schedulerframework.NodeInfo) bool { return true }
|
matchesAllNodes := func(*framework.NodeInfo) bool { return true }
|
||||||
matchesNoNodes := func(*schedulerframework.NodeInfo) bool { return false }
|
matchesNoNodes := func(*framework.NodeInfo) bool { return false }
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
nodesWithPods map[*apiv1.Node][]*apiv1.Pod
|
nodesWithPods map[*apiv1.Node][]*apiv1.Pod
|
||||||
unschedulableCandidates []*apiv1.Pod
|
unschedulableCandidates []*apiv1.Pod
|
||||||
expectedScheduledPods []*apiv1.Pod
|
expectedScheduledPods []*apiv1.Pod
|
||||||
expectedUnscheduledPods []*apiv1.Pod
|
expectedUnscheduledPods []*apiv1.Pod
|
||||||
nodeFilter func(*schedulerframework.NodeInfo) bool
|
nodeFilter func(*framework.NodeInfo) bool
|
||||||
}{
|
}{
|
||||||
"single empty node, no pods": {
|
"single empty node, no pods": {
|
||||||
nodesWithPods: map[*apiv1.Node][]*apiv1.Pod{node: {}},
|
nodesWithPods: map[*apiv1.Node][]*apiv1.Pod{node: {}},
|
||||||
|
|
@ -203,11 +203,11 @@ func TestFilterOutSchedulable(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, unschedulablePods, tc.expectedUnscheduledPods, "unschedulable pods differ")
|
assert.ElementsMatch(t, unschedulablePods, tc.expectedUnscheduledPods, "unschedulable pods differ")
|
||||||
|
|
||||||
nodeInfos, err := clusterSnapshot.NodeInfos().List()
|
nodeInfos, err := clusterSnapshot.ListNodeInfos()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
var scheduledPods []*apiv1.Pod
|
var scheduledPods []*apiv1.Pod
|
||||||
for _, nodeInfo := range nodeInfos {
|
for _, nodeInfo := range nodeInfos {
|
||||||
for _, podInfo := range nodeInfo.Pods {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
scheduledPods = append(scheduledPods, podInfo.Pod)
|
scheduledPods = append(scheduledPods, podInfo.Pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,13 +18,13 @@ package podlistprocessor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/pods"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/pods"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDefaultPodListProcessor returns a default implementation of the pod list
|
// NewDefaultPodListProcessor returns a default implementation of the pod list
|
||||||
// processor, which wraps and sequentially runs other sub-processors.
|
// processor, which wraps and sequentially runs other sub-processors.
|
||||||
func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*schedulerframework.NodeInfo) bool) *pods.CombinedPodListProcessor {
|
func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *pods.CombinedPodListProcessor {
|
||||||
return pods.NewCombinedPodListProcessor([]pods.PodListProcessor{
|
return pods.NewCombinedPodListProcessor([]pods.PodListProcessor{
|
||||||
NewClearTPURequestsPodListProcessor(),
|
NewClearTPURequestsPodListProcessor(),
|
||||||
NewFilterOutExpendablePodListProcessor(),
|
NewFilterOutExpendablePodListProcessor(),
|
||||||
|
|
|
||||||
|
|
@ -285,7 +285,7 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
nodeInfo, err := clusterSnapshot.NodeInfos().Get(node.Name)
|
nodeInfo, err := clusterSnapshot.GetNodeInfo(node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Scale-down: can't retrieve node %q from snapshot, err: %v", node.Name, err)
|
klog.Errorf("Scale-down: can't retrieve node %q from snapshot, err: %v", node.Name, err)
|
||||||
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)}
|
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)}
|
||||||
|
|
@ -317,7 +317,7 @@ func (a *Actuator) scaleDownNodeToReport(node *apiv1.Node, drain bool) (*status.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nodeInfo, err := a.ctx.ClusterSnapshot.NodeInfos().Get(node.Name)
|
nodeInfo, err := a.ctx.ClusterSnapshot.GetNodeInfo(node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ import (
|
||||||
kube_errors "k8s.io/apimachinery/pkg/api/errors"
|
kube_errors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
|
|
||||||
|
|
@ -35,7 +36,6 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -251,7 +251,7 @@ func (e Evictor) evictPod(ctx *acontext.AutoscalingContext, podToEvict *apiv1.Po
|
||||||
}
|
}
|
||||||
|
|
||||||
func podsToEvict(nodeInfo *framework.NodeInfo, evictDsByDefault bool) (dsPods, nonDsPods []*apiv1.Pod) {
|
func podsToEvict(nodeInfo *framework.NodeInfo, evictDsByDefault bool) (dsPods, nonDsPods []*apiv1.Pod) {
|
||||||
for _, podInfo := range nodeInfo.Pods {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
if pod_util.IsMirrorPod(podInfo.Pod) {
|
if pod_util.IsMirrorPod(podInfo.Pod) {
|
||||||
continue
|
continue
|
||||||
} else if pod_util.IsDaemonSetPod(podInfo.Pod) {
|
} else if pod_util.IsDaemonSetPod(podInfo.Pod) {
|
||||||
|
|
|
||||||
|
|
@ -146,7 +146,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
|
||||||
EvictionRetryTime: waitBetweenRetries,
|
EvictionRetryTime: waitBetweenRetries,
|
||||||
shutdownGracePeriodByPodPriority: drainConfig,
|
shutdownGracePeriodByPodPriority: drainConfig,
|
||||||
}
|
}
|
||||||
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = evictor.EvictDaemonSetPods(&context, nodeInfo)
|
_, err = evictor.EvictDaemonSetPods(&context, nodeInfo)
|
||||||
if scenario.err != nil {
|
if scenario.err != nil {
|
||||||
|
|
@ -213,7 +213,7 @@ func TestDrainNodeWithPods(t *testing.T) {
|
||||||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
@ -277,7 +277,7 @@ func TestDrainNodeWithPodsWithRescheduled(t *testing.T) {
|
||||||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
@ -346,7 +346,7 @@ func TestDrainNodeWithPodsWithRetries(t *testing.T) {
|
||||||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, d1})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, d1})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
@ -409,7 +409,7 @@ func TestDrainNodeWithPodsDaemonSetEvictionFailure(t *testing.T) {
|
||||||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1, d2})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, d1, d2})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
@ -470,7 +470,7 @@ func TestDrainNodeWithPodsEvictionFailure(t *testing.T) {
|
||||||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
@ -536,7 +536,7 @@ func TestDrainWithPodsNodeDisappearanceFailure(t *testing.T) {
|
||||||
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
shutdownGracePeriodByPodPriority: legacyFlagDrainConfig,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3, p4})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
evictionResults, err := evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
@ -626,9 +626,9 @@ func TestPodsToEvict(t *testing.T) {
|
||||||
if tc.nodeNameOverwrite != "" {
|
if tc.nodeNameOverwrite != "" {
|
||||||
nodeName = tc.nodeNameOverwrite
|
nodeName = tc.nodeNameOverwrite
|
||||||
}
|
}
|
||||||
nodeInfo, err := snapshot.NodeInfos().Get(nodeName)
|
nodeInfo, err := snapshot.GetNodeInfo(nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NodeInfos().Get() unexpected error: %v", err)
|
t.Fatalf("GetNodeInfo() unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
gotDsPods, gotNonDsPods := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForOccupiedNodes)
|
gotDsPods, gotNonDsPods := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForOccupiedNodes)
|
||||||
if diff := cmp.Diff(tc.wantDsPods, gotDsPods, cmpopts.EquateEmpty()); diff != "" {
|
if diff := cmp.Diff(tc.wantDsPods, gotDsPods, cmpopts.EquateEmpty()); diff != "" {
|
||||||
|
|
|
||||||
|
|
@ -20,8 +20,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
|
|
|
||||||
|
|
@ -33,10 +33,9 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/core/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/core/test"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type testIteration struct {
|
type testIteration struct {
|
||||||
|
|
@ -215,18 +214,12 @@ func scheduleAll(toSchedule []*budgets.NodeGroupView, scheduler *GroupDeletionSc
|
||||||
return fmt.Errorf("failed to get target size for node group %q: %s", bucket.Group.Id(), err)
|
return fmt.Errorf("failed to get target size for node group %q: %s", bucket.Group.Id(), err)
|
||||||
}
|
}
|
||||||
for _, node := range bucket.Nodes {
|
for _, node := range bucket.Nodes {
|
||||||
scheduler.ScheduleDeletion(infoForNode(node), bucket.Group, bucketSize, false)
|
scheduler.ScheduleDeletion(framework.NewTestNodeInfo(node), bucket.Group, bucketSize, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func infoForNode(n *apiv1.Node) *framework.NodeInfo {
|
|
||||||
info := schedulerframework.NewNodeInfo()
|
|
||||||
info.SetNode(n)
|
|
||||||
return info
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeLists(lists ...[]*budgets.NodeGroupView) []*budgets.NodeGroupView {
|
func mergeLists(lists ...[]*budgets.NodeGroupView) []*budgets.NodeGroupView {
|
||||||
merged := []*budgets.NodeGroupView{}
|
merged := []*budgets.NodeGroupView{}
|
||||||
for _, l := range lists {
|
for _, l := range lists {
|
||||||
|
|
|
||||||
|
|
@ -96,7 +96,7 @@ func TestPriorityEvictor(t *testing.T) {
|
||||||
fullDsEviction: true,
|
fullDsEviction: true,
|
||||||
}
|
}
|
||||||
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3})
|
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, []*apiv1.Node{n1}, []*apiv1.Pod{p1, p2, p3})
|
||||||
nodeInfo, err := ctx.ClusterSnapshot.NodeInfos().Get(n1.Name)
|
nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(n1.Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
_, err = evictor.DrainNode(&ctx, nodeInfo)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
||||||
|
|
@ -25,13 +25,13 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -73,7 +73,7 @@ func (c *Checker) FilterOutUnremovable(context *context.AutoscalingContext, scal
|
||||||
utilLogsQuota := klogx.NewLoggingQuota(20)
|
utilLogsQuota := klogx.NewLoggingQuota(20)
|
||||||
|
|
||||||
for _, node := range scaleDownCandidates {
|
for _, node := range scaleDownCandidates {
|
||||||
nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(node.Name)
|
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Can't retrieve scale-down candidate %s from snapshot, err: %v", node.Name, err)
|
klog.Errorf("Can't retrieve scale-down candidate %s from snapshot, err: %v", node.Name, err)
|
||||||
ineligible = append(ineligible, &simulator.UnremovableNode{Node: node, Reason: simulator.UnexpectedError})
|
ineligible = append(ineligible, &simulator.UnremovableNode{Node: node, Reason: simulator.UnexpectedError})
|
||||||
|
|
@ -106,7 +106,7 @@ func (c *Checker) FilterOutUnremovable(context *context.AutoscalingContext, scal
|
||||||
return currentlyUnneededNodeNames, utilizationMap, ineligible
|
return currentlyUnneededNodeNames, utilizationMap, ineligible
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Checker) unremovableReasonAndNodeUtilization(context *context.AutoscalingContext, timestamp time.Time, nodeInfo *schedulerframework.NodeInfo, utilLogsQuota *klogx.Quota) (simulator.UnremovableReason, *utilization.Info) {
|
func (c *Checker) unremovableReasonAndNodeUtilization(context *context.AutoscalingContext, timestamp time.Time, nodeInfo *framework.NodeInfo, utilLogsQuota *klogx.Quota) (simulator.UnremovableReason, *utilization.Info) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
|
|
||||||
if actuation.IsNodeBeingDeleted(node, timestamp) {
|
if actuation.IsNodeBeingDeleted(node, timestamp) {
|
||||||
|
|
|
||||||
|
|
@ -176,7 +176,7 @@ func (p *Planner) addUnremovableNodes(unremovableNodes []simulator.UnremovableNo
|
||||||
}
|
}
|
||||||
|
|
||||||
func allNodes(s clustersnapshot.ClusterSnapshot) ([]*apiv1.Node, error) {
|
func allNodes(s clustersnapshot.ClusterSnapshot) ([]*apiv1.Node, error) {
|
||||||
nodeInfos, err := s.NodeInfos().List()
|
nodeInfos, err := s.ListNodeInfos()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This should never happen, List() returns err only because scheduler interface requires it.
|
// This should never happen, List() returns err only because scheduler interface requires it.
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -264,7 +264,7 @@ func (p *Planner) categorizeNodes(podDestinations map[string]bool, scaleDownCand
|
||||||
unremovableCount := 0
|
unremovableCount := 0
|
||||||
var removableList []simulator.NodeToBeRemoved
|
var removableList []simulator.NodeToBeRemoved
|
||||||
atomicScaleDownNodesCount := 0
|
atomicScaleDownNodesCount := 0
|
||||||
p.unremovableNodes.Update(p.context.ClusterSnapshot.NodeInfos(), p.latestUpdate)
|
p.unremovableNodes.Update(p.context.ClusterSnapshot, p.latestUpdate)
|
||||||
currentlyUnneededNodeNames, utilizationMap, ineligible := p.eligibilityChecker.FilterOutUnremovable(p.context, scaleDownCandidates, p.latestUpdate, p.unremovableNodes)
|
currentlyUnneededNodeNames, utilizationMap, ineligible := p.eligibilityChecker.FilterOutUnremovable(p.context, scaleDownCandidates, p.latestUpdate, p.unremovableNodes)
|
||||||
for _, n := range ineligible {
|
for _, n := range ineligible {
|
||||||
p.unremovableNodes.Add(n)
|
p.unremovableNodes.Add(n)
|
||||||
|
|
|
||||||
|
|
@ -20,10 +20,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Nodes tracks the state of cluster nodes that cannot be removed.
|
// Nodes tracks the state of cluster nodes that cannot be removed.
|
||||||
|
|
@ -40,21 +40,21 @@ func NewNodes() *Nodes {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeInfoGetter is anything that can return NodeInfo object by name.
|
// nodeInfoGetter is anything that can return NodeInfo object by name.
|
||||||
type NodeInfoGetter interface {
|
type nodeInfoGetter interface {
|
||||||
Get(name string) (*schedulerframework.NodeInfo, error)
|
GetNodeInfo(name string) (*framework.NodeInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the internal structure according to current state of the
|
// Update updates the internal structure according to current state of the
|
||||||
// cluster. Removes the nodes that are no longer in the nodes list.
|
// cluster. Removes the nodes that are no longer in the nodes list.
|
||||||
func (n *Nodes) Update(nodeInfos NodeInfoGetter, timestamp time.Time) {
|
func (n *Nodes) Update(nodeInfos nodeInfoGetter, timestamp time.Time) {
|
||||||
n.reasons = make(map[string]*simulator.UnremovableNode)
|
n.reasons = make(map[string]*simulator.UnremovableNode)
|
||||||
if len(n.ttls) <= 0 {
|
if len(n.ttls) <= 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newTTLs := make(map[string]time.Time, len(n.ttls))
|
newTTLs := make(map[string]time.Time, len(n.ttls))
|
||||||
for name, ttl := range n.ttls {
|
for name, ttl := range n.ttls {
|
||||||
if _, err := nodeInfos.Get(name); err != nil {
|
if _, err := nodeInfos.GetNodeInfo(name); err != nil {
|
||||||
// Not logging on error level as most likely cause is that node is no longer in the cluster.
|
// Not logging on error level as most likely cause is that node is no longer in the cluster.
|
||||||
klog.Infof("Can't retrieve node %s from snapshot, removing from unremovable nodes, err: %v", name, err)
|
klog.Infof("Can't retrieve node %s from snapshot, removing from unremovable nodes, err: %v", name, err)
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -22,11 +22,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -107,7 +107,7 @@ type fakeNodeInfoGetter struct {
|
||||||
names map[string]bool
|
names map[string]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeNodeInfoGetter) Get(name string) (*schedulerframework.NodeInfo, error) {
|
func (f *fakeNodeInfoGetter) GetNodeInfo(name string) (*framework.NodeInfo, error) {
|
||||||
// We don't actually care about the node info object itself, just its presence.
|
// We don't actually care about the node info object itself, just its presence.
|
||||||
_, found := f.names[name]
|
_, found := f.names[name]
|
||||||
if found {
|
if found {
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ import (
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
|
|
@ -30,6 +29,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
|
|
@ -63,7 +63,7 @@ func newScaleUpExecutor(
|
||||||
// If there were multiple concurrent errors one combined error is returned.
|
// If there were multiple concurrent errors one combined error is returned.
|
||||||
func (e *scaleUpExecutor) ExecuteScaleUps(
|
func (e *scaleUpExecutor) ExecuteScaleUps(
|
||||||
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
atomic bool,
|
atomic bool,
|
||||||
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
||||||
|
|
@ -76,7 +76,7 @@ func (e *scaleUpExecutor) ExecuteScaleUps(
|
||||||
|
|
||||||
func (e *scaleUpExecutor) executeScaleUpsSync(
|
func (e *scaleUpExecutor) executeScaleUpsSync(
|
||||||
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
atomic bool,
|
atomic bool,
|
||||||
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
||||||
|
|
@ -96,7 +96,7 @@ func (e *scaleUpExecutor) executeScaleUpsSync(
|
||||||
|
|
||||||
func (e *scaleUpExecutor) executeScaleUpsParallel(
|
func (e *scaleUpExecutor) executeScaleUpsParallel(
|
||||||
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
scaleUpInfos []nodegroupset.ScaleUpInfo,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
atomic bool,
|
atomic bool,
|
||||||
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
) (errors.AutoscalerError, []cloudprovider.NodeGroup) {
|
||||||
|
|
@ -156,7 +156,7 @@ func (e *scaleUpExecutor) increaseSize(nodeGroup cloudprovider.NodeGroup, increa
|
||||||
|
|
||||||
func (e *scaleUpExecutor) executeScaleUp(
|
func (e *scaleUpExecutor) executeScaleUp(
|
||||||
info nodegroupset.ScaleUpInfo,
|
info nodegroupset.ScaleUpInfo,
|
||||||
nodeInfo *schedulerframework.NodeInfo,
|
nodeInfo *framework.NodeInfo,
|
||||||
availableGPUTypes map[string]struct{},
|
availableGPUTypes map[string]struct{},
|
||||||
now time.Time,
|
now time.Time,
|
||||||
atomic bool,
|
atomic bool,
|
||||||
|
|
|
||||||
|
|
@ -22,25 +22,24 @@ import (
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource"
|
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ScaleUpOrchestrator implements scaleup.Orchestrator interface.
|
// ScaleUpOrchestrator implements scaleup.Orchestrator interface.
|
||||||
|
|
@ -87,7 +86,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
|
||||||
unschedulablePods []*apiv1.Pod,
|
unschedulablePods []*apiv1.Pod,
|
||||||
nodes []*apiv1.Node,
|
nodes []*apiv1.Node,
|
||||||
daemonSets []*appsv1.DaemonSet,
|
daemonSets []*appsv1.DaemonSet,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
allOrNothing bool, // Either request enough capacity for all unschedulablePods, or don't request it at all.
|
allOrNothing bool, // Either request enough capacity for all unschedulablePods, or don't request it at all.
|
||||||
) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||||
if !o.initialized {
|
if !o.initialized {
|
||||||
|
|
@ -277,7 +276,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo) (int, errors.AutoscalerError) {
|
func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo) (int, errors.AutoscalerError) {
|
||||||
nodeInfo, found := nodeInfos[nodeGroup.Id()]
|
nodeInfo, found := nodeInfos[nodeGroup.Id()]
|
||||||
if !found {
|
if !found {
|
||||||
// This should never happen, as we already should have retrieved nodeInfo for any considered nodegroup.
|
// This should never happen, as we already should have retrieved nodeInfo for any considered nodegroup.
|
||||||
|
|
@ -293,7 +292,7 @@ func (o *ScaleUpOrchestrator) applyLimits(newNodes int, resourcesLeft resource.L
|
||||||
// appropriate status or error if an unexpected error occurred.
|
// appropriate status or error if an unexpected error occurred.
|
||||||
func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
|
func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
|
||||||
nodes []*apiv1.Node,
|
nodes []*apiv1.Node,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||||
if !o.initialized {
|
if !o.initialized {
|
||||||
return status.UpdateScaleUpError(&status.ScaleUpStatus{}, errors.NewAutoscalerError(errors.InternalError, "ScaleUpOrchestrator is not initialized"))
|
return status.UpdateScaleUpError(&status.ScaleUpStatus{}, errors.NewAutoscalerError(errors.InternalError, "ScaleUpOrchestrator is not initialized"))
|
||||||
|
|
@ -390,7 +389,7 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
|
||||||
// filterValidScaleUpNodeGroups filters the node groups that are valid for scale-up
|
// filterValidScaleUpNodeGroups filters the node groups that are valid for scale-up
|
||||||
func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups(
|
func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups(
|
||||||
nodeGroups []cloudprovider.NodeGroup,
|
nodeGroups []cloudprovider.NodeGroup,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
resourcesLeft resource.Limits,
|
resourcesLeft resource.Limits,
|
||||||
currentNodeCount int,
|
currentNodeCount int,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
|
|
@ -449,7 +448,7 @@ func (o *ScaleUpOrchestrator) filterValidScaleUpNodeGroups(
|
||||||
func (o *ScaleUpOrchestrator) ComputeExpansionOption(
|
func (o *ScaleUpOrchestrator) ComputeExpansionOption(
|
||||||
nodeGroup cloudprovider.NodeGroup,
|
nodeGroup cloudprovider.NodeGroup,
|
||||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
currentNodeCount int,
|
currentNodeCount int,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
allOrNothing bool,
|
allOrNothing bool,
|
||||||
|
|
@ -499,7 +498,7 @@ func (o *ScaleUpOrchestrator) ComputeExpansionOption(
|
||||||
// CreateNodeGroup will try to create a new node group based on the initialOption.
|
// CreateNodeGroup will try to create a new node group based on the initialOption.
|
||||||
func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
||||||
initialOption *expander.Option,
|
initialOption *expander.Option,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||||
podEquivalenceGroups []*equivalence.PodGroup,
|
podEquivalenceGroups []*equivalence.PodGroup,
|
||||||
daemonSets []*appsv1.DaemonSet,
|
daemonSets []*appsv1.DaemonSet,
|
||||||
|
|
@ -564,14 +563,14 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup(
|
||||||
func (o *ScaleUpOrchestrator) SchedulablePodGroups(
|
func (o *ScaleUpOrchestrator) SchedulablePodGroups(
|
||||||
podEquivalenceGroups []*equivalence.PodGroup,
|
podEquivalenceGroups []*equivalence.PodGroup,
|
||||||
nodeGroup cloudprovider.NodeGroup,
|
nodeGroup cloudprovider.NodeGroup,
|
||||||
nodeInfo *schedulerframework.NodeInfo,
|
nodeInfo *framework.NodeInfo,
|
||||||
) []estimator.PodEquivalenceGroup {
|
) []estimator.PodEquivalenceGroup {
|
||||||
o.autoscalingContext.ClusterSnapshot.Fork()
|
o.autoscalingContext.ClusterSnapshot.Fork()
|
||||||
defer o.autoscalingContext.ClusterSnapshot.Revert()
|
defer o.autoscalingContext.ClusterSnapshot.Revert()
|
||||||
|
|
||||||
// Add test node to snapshot.
|
// Add test node to snapshot.
|
||||||
var allPods []*apiv1.Pod
|
var allPods []*apiv1.Pod
|
||||||
for _, podInfo := range nodeInfo.Pods {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
allPods = append(allPods, podInfo.Pod)
|
allPods = append(allPods, podInfo.Pod)
|
||||||
}
|
}
|
||||||
if err := o.autoscalingContext.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), allPods); err != nil {
|
if err := o.autoscalingContext.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), allPods); err != nil {
|
||||||
|
|
@ -603,9 +602,9 @@ func (o *ScaleUpOrchestrator) SchedulablePodGroups(
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpcomingNodes returns a list of nodes that are not ready but should be.
|
// UpcomingNodes returns a list of nodes that are not ready but should be.
|
||||||
func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*schedulerframework.NodeInfo) ([]*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*framework.NodeInfo) ([]*framework.NodeInfo, errors.AutoscalerError) {
|
||||||
upcomingCounts, _ := o.clusterStateRegistry.GetUpcomingNodes()
|
upcomingCounts, _ := o.clusterStateRegistry.GetUpcomingNodes()
|
||||||
upcomingNodes := make([]*schedulerframework.NodeInfo, 0)
|
upcomingNodes := make([]*framework.NodeInfo, 0)
|
||||||
for nodeGroup, numberOfNodes := range upcomingCounts {
|
for nodeGroup, numberOfNodes := range upcomingCounts {
|
||||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||||
if !found {
|
if !found {
|
||||||
|
|
@ -636,7 +635,7 @@ func (o *ScaleUpOrchestrator) IsNodeGroupReadyToScaleUp(nodeGroup cloudprovider.
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNodeGroupResourceExceeded returns nil if node group resource limits are not exceeded, otherwise a reason is provided.
|
// IsNodeGroupResourceExceeded returns nil if node group resource limits are not exceeded, otherwise a reason is provided.
|
||||||
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, numNodes int) status.Reasons {
|
func (o *ScaleUpOrchestrator) IsNodeGroupResourceExceeded(resourcesLeft resource.Limits, nodeGroup cloudprovider.NodeGroup, nodeInfo *framework.NodeInfo, numNodes int) status.Reasons {
|
||||||
resourcesDelta, err := o.resourceManager.DeltaForNode(o.autoscalingContext, nodeInfo, nodeGroup)
|
resourcesDelta, err := o.resourceManager.DeltaForNode(o.autoscalingContext, nodeInfo, nodeGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err)
|
klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err)
|
||||||
|
|
@ -682,7 +681,7 @@ func (o *ScaleUpOrchestrator) balanceScaleUps(
|
||||||
now time.Time,
|
now time.Time,
|
||||||
nodeGroup cloudprovider.NodeGroup,
|
nodeGroup cloudprovider.NodeGroup,
|
||||||
newNodes int,
|
newNodes int,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||||
) ([]nodegroupset.ScaleUpInfo, errors.AutoscalerError) {
|
) ([]nodegroupset.ScaleUpInfo, errors.AutoscalerError) {
|
||||||
// Recompute similar node groups in case they need to be updated
|
// Recompute similar node groups in case they need to be updated
|
||||||
|
|
@ -718,7 +717,7 @@ func (o *ScaleUpOrchestrator) balanceScaleUps(
|
||||||
// set of pods as the main node group.
|
// set of pods as the main node group.
|
||||||
func (o *ScaleUpOrchestrator) ComputeSimilarNodeGroups(
|
func (o *ScaleUpOrchestrator) ComputeSimilarNodeGroups(
|
||||||
nodeGroup cloudprovider.NodeGroup,
|
nodeGroup cloudprovider.NodeGroup,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
) []cloudprovider.NodeGroup {
|
) []cloudprovider.NodeGroup {
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
kube_record "k8s.io/client-go/tools/record"
|
kube_record "k8s.io/client-go/tools/record"
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
|
|
@ -57,7 +58,6 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
@ -146,8 +146,7 @@ func TestZeroOrMaxNodeScaling(t *testing.T) {
|
||||||
|
|
||||||
n := BuildTestNode("n", 1000, 1000)
|
n := BuildTestNode("n", 1000, 1000)
|
||||||
SetNodeReadyState(n, true, time.Time{})
|
SetNodeReadyState(n, true, time.Time{})
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewTestNodeInfo(n)
|
||||||
nodeInfo.SetNode(n)
|
|
||||||
|
|
||||||
cases := map[string]struct {
|
cases := map[string]struct {
|
||||||
testConfig *ScaleUpTestConfig
|
testConfig *ScaleUpTestConfig
|
||||||
|
|
@ -835,8 +834,7 @@ func TestNoCreateNodeGroupMaxCoresLimitHit(t *testing.T) {
|
||||||
|
|
||||||
largeNode := BuildTestNode("n", 8000, 8000)
|
largeNode := BuildTestNode("n", 8000, 8000)
|
||||||
SetNodeReadyState(largeNode, true, time.Time{})
|
SetNodeReadyState(largeNode, true, time.Time{})
|
||||||
largeNodeInfo := schedulerframework.NewNodeInfo()
|
largeNodeInfo := framework.NewTestNodeInfo(largeNode)
|
||||||
largeNodeInfo.SetNode(largeNode)
|
|
||||||
|
|
||||||
config := &ScaleUpTestConfig{
|
config := &ScaleUpTestConfig{
|
||||||
EnableAutoprovisioning: true,
|
EnableAutoprovisioning: true,
|
||||||
|
|
@ -1004,7 +1002,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
|
||||||
}
|
}
|
||||||
if len(config.NodeTemplateConfigs) > 0 {
|
if len(config.NodeTemplateConfigs) > 0 {
|
||||||
machineTypes := []string{}
|
machineTypes := []string{}
|
||||||
machineTemplates := map[string]*schedulerframework.NodeInfo{}
|
machineTemplates := map[string]*framework.NodeInfo{}
|
||||||
for _, ntc := range config.NodeTemplateConfigs {
|
for _, ntc := range config.NodeTemplateConfigs {
|
||||||
machineTypes = append(machineTypes, ntc.MachineType)
|
machineTypes = append(machineTypes, ntc.MachineType)
|
||||||
machineTemplates[ntc.NodeGroupName] = ntc.NodeInfo
|
machineTemplates[ntc.NodeGroupName] = ntc.NodeInfo
|
||||||
|
|
@ -1285,7 +1283,7 @@ type constNodeGroupSetProcessor struct {
|
||||||
similarNodeGroups []cloudprovider.NodeGroup
|
similarNodeGroups []cloudprovider.NodeGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *constNodeGroupSetProcessor) FindSimilarNodeGroups(_ *context.AutoscalingContext, _ cloudprovider.NodeGroup, _ map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
func (p *constNodeGroupSetProcessor) FindSimilarNodeGroups(_ *context.AutoscalingContext, _ cloudprovider.NodeGroup, _ map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||||
return p.similarNodeGroups, nil
|
return p.similarNodeGroups, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1516,8 +1514,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
||||||
|
|
||||||
t1 := BuildTestNode("t1", 4000, 1000000)
|
t1 := BuildTestNode("t1", 4000, 1000000)
|
||||||
SetNodeReadyState(t1, true, time.Time{})
|
SetNodeReadyState(t1, true, time.Time{})
|
||||||
ti1 := schedulerframework.NewNodeInfo()
|
ti1 := framework.NewTestNodeInfo(t1)
|
||||||
ti1.SetNode(t1)
|
|
||||||
|
|
||||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||||
func(nodeGroup string, increase int) error {
|
func(nodeGroup string, increase int) error {
|
||||||
|
|
@ -1526,7 +1523,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
||||||
}, nil, func(nodeGroup string) error {
|
}, nil, func(nodeGroup string) error {
|
||||||
createdGroups <- nodeGroup
|
createdGroups <- nodeGroup
|
||||||
return nil
|
return nil
|
||||||
}, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1})
|
}, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1})
|
||||||
|
|
||||||
options := config.AutoscalingOptions{
|
options := config.AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
@ -1570,8 +1567,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
||||||
|
|
||||||
t1 := BuildTestNode("t1", 100, 1000000)
|
t1 := BuildTestNode("t1", 100, 1000000)
|
||||||
SetNodeReadyState(t1, true, time.Time{})
|
SetNodeReadyState(t1, true, time.Time{})
|
||||||
ti1 := schedulerframework.NewNodeInfo()
|
ti1 := framework.NewTestNodeInfo(t1)
|
||||||
ti1.SetNode(t1)
|
|
||||||
|
|
||||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||||
func(nodeGroup string, increase int) error {
|
func(nodeGroup string, increase int) error {
|
||||||
|
|
@ -1580,7 +1576,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
||||||
}, nil, func(nodeGroup string) error {
|
}, nil, func(nodeGroup string) error {
|
||||||
createdGroups <- nodeGroup
|
createdGroups <- nodeGroup
|
||||||
return nil
|
return nil
|
||||||
}, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1})
|
}, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1})
|
||||||
|
|
||||||
options := config.AutoscalingOptions{
|
options := config.AutoscalingOptions{
|
||||||
BalanceSimilarNodeGroups: true,
|
BalanceSimilarNodeGroups: true,
|
||||||
|
|
@ -1672,20 +1668,18 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
|
||||||
func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
||||||
t1 := BuildTestNode("t1", 100, 0)
|
t1 := BuildTestNode("t1", 100, 0)
|
||||||
SetNodeReadyState(t1, true, time.Time{})
|
SetNodeReadyState(t1, true, time.Time{})
|
||||||
ti1 := schedulerframework.NewNodeInfo()
|
ti1 := framework.NewTestNodeInfo(t1)
|
||||||
ti1.SetNode(t1)
|
|
||||||
|
|
||||||
t2 := BuildTestNode("t2", 0, 100)
|
t2 := BuildTestNode("t2", 0, 100)
|
||||||
SetNodeReadyState(t2, true, time.Time{})
|
SetNodeReadyState(t2, true, time.Time{})
|
||||||
ti2 := schedulerframework.NewNodeInfo()
|
ti2 := framework.NewTestNodeInfo(t2)
|
||||||
ti2.SetNode(t2)
|
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
upcomingNodeGroupsNames []string
|
upcomingNodeGroupsNames []string
|
||||||
podsToAdd []*v1.Pod
|
podsToAdd []*v1.Pod
|
||||||
isUpcomingMockMap map[string]bool
|
isUpcomingMockMap map[string]bool
|
||||||
machineTypes []string
|
machineTypes []string
|
||||||
machineTemplates map[string]*schedulerframework.NodeInfo
|
machineTemplates map[string]*framework.NodeInfo
|
||||||
expectedCreatedGroups map[string]bool
|
expectedCreatedGroups map[string]bool
|
||||||
expectedExpandedGroups map[string]int
|
expectedExpandedGroups map[string]int
|
||||||
}{
|
}{
|
||||||
|
|
@ -1694,7 +1688,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
||||||
podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0), BuildTestPod("p2", 80, 0)},
|
podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0), BuildTestPod("p2", 80, 0)},
|
||||||
isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true},
|
isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true},
|
||||||
machineTypes: []string{"T1"},
|
machineTypes: []string{"T1"},
|
||||||
machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1},
|
machineTemplates: map[string]*framework.NodeInfo{"T1": ti1},
|
||||||
expectedCreatedGroups: map[string]bool{},
|
expectedCreatedGroups: map[string]bool{},
|
||||||
expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 2},
|
expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 2},
|
||||||
},
|
},
|
||||||
|
|
@ -1703,7 +1697,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
||||||
podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0)},
|
podsToAdd: []*v1.Pod{BuildTestPod("p1", 80, 0)},
|
||||||
isUpcomingMockMap: map[string]bool{},
|
isUpcomingMockMap: map[string]bool{},
|
||||||
machineTypes: []string{"T1"},
|
machineTypes: []string{"T1"},
|
||||||
machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1},
|
machineTemplates: map[string]*framework.NodeInfo{"T1": ti1},
|
||||||
expectedCreatedGroups: map[string]bool{"autoprovisioned-T1": true},
|
expectedCreatedGroups: map[string]bool{"autoprovisioned-T1": true},
|
||||||
expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 1},
|
expectedExpandedGroups: map[string]int{"autoprovisioned-T1": 1},
|
||||||
},
|
},
|
||||||
|
|
@ -1712,7 +1706,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
|
||||||
podsToAdd: []*v1.Pod{BuildTestPod("p3", 0, 100), BuildTestPod("p2", 0, 100)},
|
podsToAdd: []*v1.Pod{BuildTestPod("p3", 0, 100), BuildTestPod("p2", 0, 100)},
|
||||||
isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true},
|
isUpcomingMockMap: map[string]bool{"autoprovisioned-T1": true},
|
||||||
machineTypes: []string{"T1", "T2"},
|
machineTypes: []string{"T1", "T2"},
|
||||||
machineTemplates: map[string]*schedulerframework.NodeInfo{"T1": ti1, "T2": ti2},
|
machineTemplates: map[string]*framework.NodeInfo{"T1": ti1, "T2": ti2},
|
||||||
expectedCreatedGroups: map[string]bool{"autoprovisioned-T2": true},
|
expectedCreatedGroups: map[string]bool{"autoprovisioned-T2": true},
|
||||||
expectedExpandedGroups: map[string]int{"autoprovisioned-T2": 2},
|
expectedExpandedGroups: map[string]int{"autoprovisioned-T2": 2},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -26,9 +26,9 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/customresources"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/customresources"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LimitUnknown is used as a value in ResourcesLimits if actual limit could not be obtained due to errors talking to cloud provider.
|
// LimitUnknown is used as a value in ResourcesLimits if actual limit could not be obtained due to errors talking to cloud provider.
|
||||||
|
|
@ -59,7 +59,7 @@ func NewManager(crp customresources.CustomResourcesProcessor) *Manager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeltaForNode calculates the amount of resources that will be used from the cluster when creating a node.
|
// DeltaForNode calculates the amount of resources that will be used from the cluster when creating a node.
|
||||||
func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (Delta, errors.AutoscalerError) {
|
func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (Delta, errors.AutoscalerError) {
|
||||||
resultScaleUpDelta := make(Delta)
|
resultScaleUpDelta := make(Delta)
|
||||||
nodeCPU, nodeMemory := utils.GetNodeCoresAndMemory(nodeInfo.Node())
|
nodeCPU, nodeMemory := utils.GetNodeCoresAndMemory(nodeInfo.Node())
|
||||||
resultScaleUpDelta[cloudprovider.ResourceNameCores] = nodeCPU
|
resultScaleUpDelta[cloudprovider.ResourceNameCores] = nodeCPU
|
||||||
|
|
@ -85,7 +85,7 @@ func (m *Manager) DeltaForNode(ctx *context.AutoscalingContext, nodeInfo *schedu
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourcesLeft calculates the amount of resources left in the cluster.
|
// ResourcesLeft calculates the amount of resources left in the cluster.
|
||||||
func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodes []*corev1.Node) (Limits, errors.AutoscalerError) {
|
func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodes []*corev1.Node) (Limits, errors.AutoscalerError) {
|
||||||
nodesFromNotAutoscaledGroups, err := utils.FilterOutNodesFromNotAutoscaledGroups(nodes, ctx.CloudProvider)
|
nodesFromNotAutoscaledGroups, err := utils.FilterOutNodesFromNotAutoscaledGroups(nodes, ctx.CloudProvider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ")
|
return nil, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ")
|
||||||
|
|
@ -143,7 +143,7 @@ func (m *Manager) ResourcesLeft(ctx *context.AutoscalingContext, nodeInfos map[s
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyLimits calculates the new node count by applying the left resource limits of the cluster.
|
// ApplyLimits calculates the new node count by applying the left resource limits of the cluster.
|
||||||
func (m *Manager) ApplyLimits(ctx *context.AutoscalingContext, newCount int, resourceLeft Limits, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (int, errors.AutoscalerError) {
|
func (m *Manager) ApplyLimits(ctx *context.AutoscalingContext, newCount int, resourceLeft Limits, nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup) (int, errors.AutoscalerError) {
|
||||||
delta, err := m.DeltaForNode(ctx, nodeInfo, nodeGroup)
|
delta, err := m.DeltaForNode(ctx, nodeInfo, nodeGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|
@ -203,7 +203,7 @@ func LimitsNotExceeded() LimitsCheckResult {
|
||||||
return LimitsCheckResult{false, []string{}}
|
return LimitsCheckResult{false, []string{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (int64, int64, errors.AutoscalerError) {
|
func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (int64, int64, errors.AutoscalerError) {
|
||||||
var coresTotal int64
|
var coresTotal int64
|
||||||
var memoryTotal int64
|
var memoryTotal int64
|
||||||
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
|
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
|
||||||
|
|
@ -233,7 +233,7 @@ func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos ma
|
||||||
return coresTotal, memoryTotal, nil
|
return coresTotal, memoryTotal, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*schedulerframework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (map[string]int64, errors.AutoscalerError) {
|
func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfos map[string]*framework.NodeInfo, nodesFromNotAutoscaledGroups []*corev1.Node) (map[string]int64, errors.AutoscalerError) {
|
||||||
result := make(map[string]int64)
|
result := make(map[string]int64)
|
||||||
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
|
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
|
||||||
currentSize, err := nodeGroup.TargetSize()
|
currentSize, err := nodeGroup.TargetSize()
|
||||||
|
|
|
||||||
|
|
@ -24,9 +24,9 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||||
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Orchestrator is a component that picks the node group to resize and triggers
|
// Orchestrator is a component that picks the node group to resize and triggers
|
||||||
|
|
@ -47,7 +47,7 @@ type Orchestrator interface {
|
||||||
unschedulablePods []*apiv1.Pod,
|
unschedulablePods []*apiv1.Pod,
|
||||||
nodes []*apiv1.Node,
|
nodes []*apiv1.Node,
|
||||||
daemonSets []*appsv1.DaemonSet,
|
daemonSets []*appsv1.DaemonSet,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
allOrNothing bool,
|
allOrNothing bool,
|
||||||
) (*status.ScaleUpStatus, errors.AutoscalerError)
|
) (*status.ScaleUpStatus, errors.AutoscalerError)
|
||||||
// ScaleUpToNodeGroupMinSize tries to scale up node groups that have less nodes
|
// ScaleUpToNodeGroupMinSize tries to scale up node groups that have less nodes
|
||||||
|
|
@ -56,6 +56,6 @@ type Orchestrator interface {
|
||||||
// appropriate status or error if an unexpected error occurred.
|
// appropriate status or error if an unexpected error occurred.
|
||||||
ScaleUpToNodeGroupMinSize(
|
ScaleUpToNodeGroupMinSize(
|
||||||
nodes []*apiv1.Node,
|
nodes []*apiv1.Node,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
) (*status.ScaleUpStatus, errors.AutoscalerError)
|
) (*status.ScaleUpStatus, errors.AutoscalerError)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||||
|
|
@ -58,7 +59,6 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -496,8 +496,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
l, err := a.ClusterSnapshot.ListNodeInfos()
|
||||||
l, err := a.ClusterSnapshot.NodeInfos().List()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Unable to fetch ClusterNode List for Debugging Snapshot, %v", err)
|
klog.Errorf("Unable to fetch ClusterNode List for Debugging Snapshot, %v", err)
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -679,7 +678,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*schedulerframework.NodeInfo) error {
|
func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[string]int, nodeInfosForGroups map[string]*framework.NodeInfo) error {
|
||||||
nodeGroups := a.nodeGroupsById()
|
nodeGroups := a.nodeGroupsById()
|
||||||
upcomingNodeGroups := make(map[string]int)
|
upcomingNodeGroups := make(map[string]int)
|
||||||
upcomingNodesFromUpcomingNodeGroups := 0
|
upcomingNodesFromUpcomingNodeGroups := 0
|
||||||
|
|
@ -691,7 +690,7 @@ func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[
|
||||||
isUpcomingNodeGroup := a.processors.AsyncNodeGroupStateChecker.IsUpcoming(nodeGroup)
|
isUpcomingNodeGroup := a.processors.AsyncNodeGroupStateChecker.IsUpcoming(nodeGroup)
|
||||||
for _, upcomingNode := range upcomingNodes {
|
for _, upcomingNode := range upcomingNodes {
|
||||||
var pods []*apiv1.Pod
|
var pods []*apiv1.Pod
|
||||||
for _, podInfo := range upcomingNode.Pods {
|
for _, podInfo := range upcomingNode.Pods() {
|
||||||
pods = append(pods, podInfo.Pod)
|
pods = append(pods, podInfo.Pod)
|
||||||
}
|
}
|
||||||
err := a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), pods)
|
err := a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), pods)
|
||||||
|
|
@ -989,7 +988,7 @@ func filterNodesFromSelectedGroups(cp cloudprovider.CloudProvider, nodes ...*api
|
||||||
return filtered
|
return filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) caerrors.AutoscalerError {
|
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*framework.NodeInfo, currentTime time.Time) caerrors.AutoscalerError {
|
||||||
err := a.clusterStateRegistry.UpdateNodes(allNodes, nodeInfosForGroups, currentTime)
|
err := a.clusterStateRegistry.UpdateNodes(allNodes, nodeInfosForGroups, currentTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to update node registry: %v", err)
|
klog.Errorf("Failed to update node registry: %v", err)
|
||||||
|
|
@ -1016,8 +1015,8 @@ func allPodsAreNew(pods []*apiv1.Pod, currentTime time.Time) bool {
|
||||||
return found && oldest.Add(unschedulablePodWithGpuTimeBuffer).After(currentTime)
|
return found && oldest.Add(unschedulablePodWithGpuTimeBuffer).After(currentTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*schedulerframework.NodeInfo) map[string][]*schedulerframework.NodeInfo {
|
func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*framework.NodeInfo) map[string][]*framework.NodeInfo {
|
||||||
upcomingNodes := make(map[string][]*schedulerframework.NodeInfo)
|
upcomingNodes := make(map[string][]*framework.NodeInfo)
|
||||||
for nodeGroup, numberOfNodes := range upcomingCounts {
|
for nodeGroup, numberOfNodes := range upcomingCounts {
|
||||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||||
if !found {
|
if !found {
|
||||||
|
|
@ -1030,7 +1029,7 @@ func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*s
|
||||||
}
|
}
|
||||||
nodeTemplate.Node().Annotations[NodeUpcomingAnnotation] = "true"
|
nodeTemplate.Node().Annotations[NodeUpcomingAnnotation] = "true"
|
||||||
|
|
||||||
var nodes []*schedulerframework.NodeInfo
|
var nodes []*framework.NodeInfo
|
||||||
for i := 0; i < numberOfNodes; i++ {
|
for i := 0; i < numberOfNodes; i++ {
|
||||||
// Ensure new nodes have different names because nodeName
|
// Ensure new nodes have different names because nodeName
|
||||||
// will be used as a map key. Also deep copy pods (daemonsets &
|
// will be used as a map key. Also deep copy pods (daemonsets &
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||||
|
|
@ -72,7 +73,6 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
v1appslister "k8s.io/client-go/listers/apps/v1"
|
v1appslister "k8s.io/client-go/listers/apps/v1"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
|
@ -336,8 +336,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
p2 := BuildTestPod("p2", 600, 100, MarkUnschedulable())
|
p2 := BuildTestPod("p2", 600, 100, MarkUnschedulable())
|
||||||
|
|
||||||
tn := BuildTestNode("tn", 1000, 1000)
|
tn := BuildTestNode("tn", 1000, 1000)
|
||||||
tni := schedulerframework.NewNodeInfo()
|
tni := framework.NewTestNodeInfo(tn)
|
||||||
tni.SetNode(tn)
|
|
||||||
|
|
||||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||||
func(id string, delta int) error {
|
func(id string, delta int) error {
|
||||||
|
|
@ -348,7 +347,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
||||||
return ret
|
return ret
|
||||||
},
|
},
|
||||||
nil, nil,
|
nil, nil,
|
||||||
nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni, "ng3": tni})
|
nil, map[string]*framework.NodeInfo{"ng1": tni, "ng2": tni, "ng3": tni})
|
||||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||||
provider.AddNode("ng1", n1)
|
provider.AddNode("ng1", n1)
|
||||||
ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup)
|
ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup)
|
||||||
|
|
@ -514,8 +513,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {
|
||||||
SetNodeReadyState(n2, true, time.Now())
|
SetNodeReadyState(n2, true, time.Now())
|
||||||
|
|
||||||
tn := BuildTestNode("tn", 1000, 1000)
|
tn := BuildTestNode("tn", 1000, 1000)
|
||||||
tni := schedulerframework.NewNodeInfo()
|
tni := framework.NewTestNodeInfo(tn)
|
||||||
tni.SetNode(tn)
|
|
||||||
|
|
||||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||||
func(id string, delta int) error {
|
func(id string, delta int) error {
|
||||||
|
|
@ -526,7 +524,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {
|
||||||
return ret
|
return ret
|
||||||
},
|
},
|
||||||
nil, nil,
|
nil, nil,
|
||||||
nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni})
|
nil, map[string]*framework.NodeInfo{"ng1": tni, "ng2": tni})
|
||||||
assert.NotNil(t, provider)
|
assert.NotNil(t, provider)
|
||||||
|
|
||||||
provider.AddNodeGroup("ng1", 0, 10, 1)
|
provider.AddNodeGroup("ng1", 0, 10, 1)
|
||||||
|
|
@ -744,16 +742,13 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
||||||
|
|
||||||
tn1 := BuildTestNode("tn1", 100, 1000)
|
tn1 := BuildTestNode("tn1", 100, 1000)
|
||||||
SetNodeReadyState(tn1, true, time.Now())
|
SetNodeReadyState(tn1, true, time.Now())
|
||||||
tni1 := schedulerframework.NewNodeInfo()
|
tni1 := framework.NewTestNodeInfo(tn1)
|
||||||
tni1.SetNode(tn1)
|
|
||||||
tn2 := BuildTestNode("tn2", 1000, 1000)
|
tn2 := BuildTestNode("tn2", 1000, 1000)
|
||||||
SetNodeReadyState(tn2, true, time.Now())
|
SetNodeReadyState(tn2, true, time.Now())
|
||||||
tni2 := schedulerframework.NewNodeInfo()
|
tni2 := framework.NewTestNodeInfo(tn2)
|
||||||
tni2.SetNode(tn2)
|
|
||||||
tn3 := BuildTestNode("tn3", 100, 1000)
|
tn3 := BuildTestNode("tn3", 100, 1000)
|
||||||
SetNodeReadyState(tn2, true, time.Now())
|
SetNodeReadyState(tn2, true, time.Now())
|
||||||
tni3 := schedulerframework.NewNodeInfo()
|
tni3 := framework.NewTestNodeInfo(tn3)
|
||||||
tni3.SetNode(tn3)
|
|
||||||
|
|
||||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||||
func(id string, delta int) error {
|
func(id string, delta int) error {
|
||||||
|
|
@ -767,7 +762,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
||||||
}, func(id string) error {
|
}, func(id string) error {
|
||||||
return onNodeGroupDeleteMock.Delete(id)
|
return onNodeGroupDeleteMock.Delete(id)
|
||||||
},
|
},
|
||||||
[]string{"TN1", "TN2"}, map[string]*schedulerframework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
[]string{"TN1", "TN2"}, map[string]*framework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||||
provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN1", 0, 10, 0, "TN1")
|
provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN1", 0, 10, 0, "TN1")
|
||||||
autoprovisionedTN1 := reflect.ValueOf(provider.GetNodeGroup("autoprovisioned-TN1")).Interface().(*testprovider.TestNodeGroup)
|
autoprovisionedTN1 := reflect.ValueOf(provider.GetNodeGroup("autoprovisioned-TN1")).Interface().(*testprovider.TestNodeGroup)
|
||||||
|
|
@ -2005,13 +2000,13 @@ func (f *candidateTrackingFakePlanner) NodeUtilizationMap() map[string]utilizati
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertSnapshotNodeCount(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, wantCount int) {
|
func assertSnapshotNodeCount(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, wantCount int) {
|
||||||
nodeInfos, err := snapshot.NodeInfos().List()
|
nodeInfos, err := snapshot.ListNodeInfos()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, nodeInfos, wantCount)
|
assert.Len(t, nodeInfos, wantCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) {
|
func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) {
|
||||||
nodeInfos, err := snapshot.NodeInfos().List()
|
nodeInfos, err := snapshot.ListNodeInfos()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
for _, nodeInfo := range nodeInfos {
|
for _, nodeInfo := range nodeInfos {
|
||||||
assert.NotContains(t, nodeNames, nodeInfo.Node().Name)
|
assert.NotContains(t, nodeNames, nodeInfo.Node().Name)
|
||||||
|
|
@ -2019,7 +2014,7 @@ func assertNodesNotInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnap
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNodesInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) {
|
func assertNodesInSnapshot(t *testing.T, snapshot clustersnapshot.ClusterSnapshot, nodeNames map[string]bool) {
|
||||||
nodeInfos, err := snapshot.NodeInfos().List()
|
nodeInfos, err := snapshot.ListNodeInfos()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
snapshotNodeNames := map[string]bool{}
|
snapshotNodeNames := map[string]bool{}
|
||||||
for _, nodeInfo := range nodeInfos {
|
for _, nodeInfo := range nodeInfos {
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
|
|
@ -48,7 +49,6 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
kube_client "k8s.io/client-go/kubernetes"
|
kube_client "k8s.io/client-go/kubernetes"
|
||||||
kube_record "k8s.io/client-go/tools/record"
|
kube_record "k8s.io/client-go/tools/record"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeConfig is a node config used in tests
|
// NodeConfig is a node config used in tests
|
||||||
|
|
@ -100,7 +100,7 @@ type NodeGroupConfig struct {
|
||||||
// NodeTemplateConfig is a structure to provide node info in tests
|
// NodeTemplateConfig is a structure to provide node info in tests
|
||||||
type NodeTemplateConfig struct {
|
type NodeTemplateConfig struct {
|
||||||
MachineType string
|
MachineType string
|
||||||
NodeInfo *schedulerframework.NodeInfo
|
NodeInfo *framework.NodeInfo
|
||||||
NodeGroupName string
|
NodeGroupName string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -284,9 +284,9 @@ type MockAutoprovisioningNodeGroupListProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process extends the list of node groups
|
// Process extends the list of node groups
|
||||||
func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo,
|
func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo,
|
||||||
unschedulablePods []*apiv1.Pod,
|
unschedulablePods []*apiv1.Pod,
|
||||||
) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) {
|
) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) {
|
||||||
machines, err := context.CloudProvider.GetAvailableMachineTypes()
|
machines, err := context.CloudProvider.GetAvailableMachineTypes()
|
||||||
assert.NoError(p.T, err)
|
assert.NoError(p.T, err)
|
||||||
|
|
||||||
|
|
@ -368,7 +368,7 @@ func (r *MockReportingStrategy) LastInputOptions() []GroupSizeChange {
|
||||||
// BestOption satisfies the Strategy interface. Picks the best option from those passed as an argument.
|
// BestOption satisfies the Strategy interface. Picks the best option from those passed as an argument.
|
||||||
// When parameter optionToChoose is defined, it's picked as the best one.
|
// When parameter optionToChoose is defined, it's picked as the best one.
|
||||||
// Otherwise, random option is used.
|
// Otherwise, random option is used.
|
||||||
func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||||
r.results.inputOptions = expanderOptionsToGroupSizeChanges(options)
|
r.results.inputOptions = expanderOptionsToGroupSizeChanges(options)
|
||||||
if r.optionToChoose == nil {
|
if r.optionToChoose == nil {
|
||||||
return r.defaultStrategy.BestOption(options, nodeInfo)
|
return r.defaultStrategy.BestOption(options, nodeInfo)
|
||||||
|
|
|
||||||
|
|
@ -27,16 +27,16 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
|
// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
|
||||||
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) {
|
||||||
id := nodeGroup.Id()
|
id := nodeGroup.Id()
|
||||||
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -55,12 +55,11 @@ func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||||
}
|
}
|
||||||
for _, podInfo := range baseNodeInfo.Pods {
|
for _, podInfo := range baseNodeInfo.Pods() {
|
||||||
pods = append(pods, podInfo.Pod)
|
pods = append(pods, &framework.PodInfo{Pod: podInfo.Pod})
|
||||||
}
|
}
|
||||||
|
|
||||||
sanitizedNodeInfo := schedulerframework.NewNodeInfo(SanitizePods(pods, sanitizedNode)...)
|
sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, SanitizePods(pods, sanitizedNode)...)
|
||||||
sanitizedNodeInfo.SetNode(sanitizedNode)
|
|
||||||
return sanitizedNodeInfo, nil
|
return sanitizedNodeInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -91,15 +90,14 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyNodeInfo clones the provided nodeInfo
|
// DeepCopyNodeInfo clones the provided nodeInfo
|
||||||
func DeepCopyNodeInfo(nodeInfo *schedulerframework.NodeInfo) *schedulerframework.NodeInfo {
|
func DeepCopyNodeInfo(nodeInfo *framework.NodeInfo) *framework.NodeInfo {
|
||||||
newPods := make([]*apiv1.Pod, 0)
|
newPods := make([]*framework.PodInfo, 0)
|
||||||
for _, podInfo := range nodeInfo.Pods {
|
for _, podInfo := range nodeInfo.Pods() {
|
||||||
newPods = append(newPods, podInfo.Pod.DeepCopy())
|
newPods = append(newPods, &framework.PodInfo{Pod: podInfo.Pod.DeepCopy()})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build a new node info.
|
// Build a new node info.
|
||||||
newNodeInfo := schedulerframework.NewNodeInfo(newPods...)
|
newNodeInfo := framework.NewNodeInfo(nodeInfo.Node().DeepCopy(), nil, newPods...)
|
||||||
newNodeInfo.SetNode(nodeInfo.Node().DeepCopy())
|
|
||||||
return newNodeInfo
|
return newNodeInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -121,13 +119,13 @@ func SanitizeNode(node *apiv1.Node, nodeGroup string, taintConfig taints.TaintCo
|
||||||
}
|
}
|
||||||
|
|
||||||
// SanitizePods cleans up pods used for node group templates
|
// SanitizePods cleans up pods used for node group templates
|
||||||
func SanitizePods(pods []*apiv1.Pod, sanitizedNode *apiv1.Node) []*apiv1.Pod {
|
func SanitizePods(pods []*framework.PodInfo, sanitizedNode *apiv1.Node) []*framework.PodInfo {
|
||||||
// Update node name in pods.
|
// Update node name in pods.
|
||||||
sanitizedPods := make([]*apiv1.Pod, 0)
|
sanitizedPods := make([]*framework.PodInfo, 0)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
sanitizedPod := pod.DeepCopy()
|
sanitizedPod := pod.Pod.DeepCopy()
|
||||||
sanitizedPod.Spec.NodeName = sanitizedNode.Name
|
sanitizedPod.Spec.NodeName = sanitizedNode.Name
|
||||||
sanitizedPods = append(sanitizedPods, sanitizedPod)
|
sanitizedPods = append(sanitizedPods, &framework.PodInfo{Pod: sanitizedPod})
|
||||||
}
|
}
|
||||||
|
|
||||||
return sanitizedPods
|
return sanitizedPods
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
|
|
||||||
|
|
@ -31,7 +32,7 @@ import (
|
||||||
func TestSanitizePods(t *testing.T) {
|
func TestSanitizePods(t *testing.T) {
|
||||||
pod := BuildTestPod("p1", 80, 0)
|
pod := BuildTestPod("p1", 80, 0)
|
||||||
pod.Spec.NodeName = "n1"
|
pod.Spec.NodeName = "n1"
|
||||||
pods := []*apiv1.Pod{pod}
|
pods := []*framework.PodInfo{{Pod: pod}}
|
||||||
|
|
||||||
node := BuildTestNode("node", 1000, 1000)
|
node := BuildTestNode("node", 1000, 1000)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterNode captures a single entity of nodeInfo. i.e. Node specs and all the pods on that node.
|
// ClusterNode captures a single entity of nodeInfo. i.e. Node specs and all the pods on that node.
|
||||||
|
|
@ -98,7 +98,7 @@ func GetClusterNodeCopy(template *framework.NodeInfo) *ClusterNode {
|
||||||
cNode := &ClusterNode{}
|
cNode := &ClusterNode{}
|
||||||
cNode.Node = template.Node().DeepCopy()
|
cNode.Node = template.Node().DeepCopy()
|
||||||
var pods []*v1.Pod
|
var pods []*v1.Pod
|
||||||
for _, p := range template.Pods {
|
for _, p := range template.Pods() {
|
||||||
pods = append(pods, p.Pod.DeepCopy())
|
pods = append(pods, p.Pod.DeepCopy())
|
||||||
}
|
}
|
||||||
cNode.Pods = pods
|
cNode.Pods = pods
|
||||||
|
|
|
||||||
|
|
@ -24,21 +24,17 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBasicSetterWorkflow(t *testing.T) {
|
func TestBasicSetterWorkflow(t *testing.T) {
|
||||||
snapshot := &DebuggingSnapshotImpl{}
|
snapshot := &DebuggingSnapshotImpl{}
|
||||||
pod := []*framework.PodInfo{
|
pod := &v1.Pod{
|
||||||
{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Pod: &v1.Pod{
|
Name: "Pod1",
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
},
|
||||||
Name: "Pod1",
|
Spec: v1.PodSpec{
|
||||||
},
|
NodeName: "testNode",
|
||||||
Spec: v1.PodSpec{
|
|
||||||
NodeName: "testNode",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
|
|
@ -46,18 +42,10 @@ func TestBasicSetterWorkflow(t *testing.T) {
|
||||||
Name: "testNode",
|
Name: "testNode",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
nodeInfo := framework.NewTestNodeInfo(node, pod)
|
||||||
nodeInfo := &framework.NodeInfo{
|
|
||||||
Pods: pod,
|
|
||||||
Requested: &framework.Resource{},
|
|
||||||
NonZeroRequested: &framework.Resource{},
|
|
||||||
Allocatable: &framework.Resource{},
|
|
||||||
Generation: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeGroups []*framework.NodeInfo
|
var nodeGroups []*framework.NodeInfo
|
||||||
nodeGroups = append(nodeGroups, nodeInfo)
|
nodeGroups = append(nodeGroups, nodeInfo)
|
||||||
nodeGroups[0].SetNode(node)
|
|
||||||
timestamp := time.Now().In(time.UTC)
|
timestamp := time.Now().In(time.UTC)
|
||||||
snapshot.SetClusterNodes(nodeGroups)
|
snapshot.SetClusterNodes(nodeGroups)
|
||||||
snapshot.SetEndTimestamp(timestamp)
|
snapshot.SetEndTimestamp(timestamp)
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DebuggingSnapshotterState is the type for the debugging snapshot State machine
|
// DebuggingSnapshotterState is the type for the debugging snapshot State machine
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBasicSnapshotRequest(t *testing.T) {
|
func TestBasicSnapshotRequest(t *testing.T) {
|
||||||
|
|
@ -33,16 +33,12 @@ func TestBasicSnapshotRequest(t *testing.T) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
snapshotter := NewDebuggingSnapshotter(true)
|
snapshotter := NewDebuggingSnapshotter(true)
|
||||||
|
|
||||||
pod := []*framework.PodInfo{
|
pod := &v1.Pod{
|
||||||
{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Pod: &v1.Pod{
|
Name: "Pod1",
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
},
|
||||||
Name: "Pod1",
|
Spec: v1.PodSpec{
|
||||||
},
|
NodeName: "testNode",
|
||||||
Spec: v1.PodSpec{
|
|
||||||
NodeName: "testNode",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
|
|
@ -50,18 +46,10 @@ func TestBasicSnapshotRequest(t *testing.T) {
|
||||||
Name: "testNode",
|
Name: "testNode",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
nodeInfo := framework.NewTestNodeInfo(node, pod)
|
||||||
nodeInfo := &framework.NodeInfo{
|
|
||||||
Pods: pod,
|
|
||||||
Requested: &framework.Resource{},
|
|
||||||
NonZeroRequested: &framework.Resource{},
|
|
||||||
Allocatable: &framework.Resource{},
|
|
||||||
Generation: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeGroups []*framework.NodeInfo
|
var nodeGroups []*framework.NodeInfo
|
||||||
nodeGroups = append(nodeGroups, nodeInfo)
|
nodeGroups = append(nodeGroups, nodeInfo)
|
||||||
nodeGroups[0].SetNode(node)
|
|
||||||
|
|
||||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
|
|
|
||||||
|
|
@ -22,10 +22,10 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BinpackingNodeEstimator estimates the number of needed nodes to handle the given amount of pods.
|
// BinpackingNodeEstimator estimates the number of needed nodes to handle the given amount of pods.
|
||||||
|
|
@ -89,7 +89,7 @@ func newEstimationState() *estimationState {
|
||||||
// Returns the number of nodes needed to accommodate all pods from the list.
|
// Returns the number of nodes needed to accommodate all pods from the list.
|
||||||
func (e *BinpackingNodeEstimator) Estimate(
|
func (e *BinpackingNodeEstimator) Estimate(
|
||||||
podsEquivalenceGroups []PodEquivalenceGroup,
|
podsEquivalenceGroups []PodEquivalenceGroup,
|
||||||
nodeTemplate *schedulerframework.NodeInfo,
|
nodeTemplate *framework.NodeInfo,
|
||||||
nodeGroup cloudprovider.NodeGroup,
|
nodeGroup cloudprovider.NodeGroup,
|
||||||
) (int, []*apiv1.Pod) {
|
) (int, []*apiv1.Pod) {
|
||||||
|
|
||||||
|
|
@ -136,7 +136,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes(
|
||||||
pod := pods[index]
|
pod := pods[index]
|
||||||
|
|
||||||
// Check schedulability on all nodes created during simulation
|
// Check schedulability on all nodes created during simulation
|
||||||
nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *schedulerframework.NodeInfo) bool {
|
nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *framework.NodeInfo) bool {
|
||||||
return estimationState.newNodeNames[nodeInfo.Node().Name]
|
return estimationState.newNodeNames[nodeInfo.Node().Name]
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -152,7 +152,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes(
|
||||||
|
|
||||||
func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
|
func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
|
||||||
estimationState *estimationState,
|
estimationState *estimationState,
|
||||||
nodeTemplate *schedulerframework.NodeInfo,
|
nodeTemplate *framework.NodeInfo,
|
||||||
pods []*apiv1.Pod,
|
pods []*apiv1.Pod,
|
||||||
) error {
|
) error {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
|
|
@ -208,11 +208,11 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
|
||||||
|
|
||||||
func (e *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
func (e *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
||||||
estimationState *estimationState,
|
estimationState *estimationState,
|
||||||
template *schedulerframework.NodeInfo,
|
template *framework.NodeInfo,
|
||||||
) error {
|
) error {
|
||||||
newNodeInfo := scheduler.DeepCopyTemplateNode(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex))
|
newNodeInfo := scheduler.DeepCopyTemplateNode(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex))
|
||||||
var pods []*apiv1.Pod
|
var pods []*apiv1.Pod
|
||||||
for _, podInfo := range newNodeInfo.Pods {
|
for _, podInfo := range newNodeInfo.Pods() {
|
||||||
pods = append(pods, podInfo.Pod)
|
pods = append(pods, podInfo.Pod)
|
||||||
}
|
}
|
||||||
if err := e.clusterSnapshot.AddNodeWithPods(newNodeInfo.Node(), pods); err != nil {
|
if err := e.clusterSnapshot.AddNodeWithPods(newNodeInfo.Node(), pods); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,10 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
@ -222,8 +222,7 @@ func TestBinpackingEstimate(t *testing.T) {
|
||||||
processor := NewDecreasingPodOrderer()
|
processor := NewDecreasingPodOrderer()
|
||||||
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
|
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
|
||||||
node := makeNode(tc.millicores, tc.memory, 10, "template", "zone-mars")
|
node := makeNode(tc.millicores, tc.memory, 10, "template", "zone-mars")
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewTestNodeInfo(node)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
|
|
||||||
estimatedNodes, estimatedPods := estimator.Estimate(tc.podsEquivalenceGroup, nodeInfo, nil)
|
estimatedNodes, estimatedPods := estimator.Estimate(tc.podsEquivalenceGroup, nodeInfo, nil)
|
||||||
assert.Equal(t, tc.expectNodeCount, estimatedNodes)
|
assert.Equal(t, tc.expectNodeCount, estimatedNodes)
|
||||||
|
|
@ -277,8 +276,7 @@ func BenchmarkBinpackingEstimate(b *testing.B) {
|
||||||
processor := NewDecreasingPodOrderer()
|
processor := NewDecreasingPodOrderer()
|
||||||
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
|
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
|
||||||
node := makeNode(millicores, memory, podsPerNode, "template", "zone-mars")
|
node := makeNode(millicores, memory, podsPerNode, "template", "zone-mars")
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewTestNodeInfo(node)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
|
|
||||||
estimatedNodes, estimatedPods := estimator.Estimate(podsEquivalenceGroup, nodeInfo, nil)
|
estimatedNodes, estimatedPods := estimator.Estimate(podsEquivalenceGroup, nodeInfo, nil)
|
||||||
assert.Equal(b, expectNodeCount, estimatedNodes)
|
assert.Equal(b, expectNodeCount, estimatedNodes)
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// podScoreInfo contains Pod and score that corresponds to how important it is to handle the pod first.
|
// podScoreInfo contains Pod and score that corresponds to how important it is to handle the pod first.
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,8 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPodPriorityProcessor(t *testing.T) {
|
func TestPodPriorityProcessor(t *testing.T) {
|
||||||
|
|
@ -57,8 +57,7 @@ func TestPodPriorityProcessor(t *testing.T) {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
processor := NewDecreasingPodOrderer()
|
processor := NewDecreasingPodOrderer()
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewTestNodeInfo(node)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
actual := processor.Order(tc.inputPodsEquivalentGroup, nodeInfo, nil)
|
actual := processor.Order(tc.inputPodsEquivalentGroup, nodeInfo, nil)
|
||||||
assert.Equal(t, tc.expectedPodsEquivalentGroup, actual)
|
assert.Equal(t, tc.expectedPodsEquivalentGroup, actual)
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -22,9 +22,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -54,7 +53,7 @@ func (p *PodEquivalenceGroup) Exemplar() *apiv1.Pod {
|
||||||
// to schedule on those nodes.
|
// to schedule on those nodes.
|
||||||
type Estimator interface {
|
type Estimator interface {
|
||||||
// Estimate estimates how many nodes are needed to provision pods coming from the given equivalence groups.
|
// Estimate estimates how many nodes are needed to provision pods coming from the given equivalence groups.
|
||||||
Estimate([]PodEquivalenceGroup, *schedulerframework.NodeInfo, cloudprovider.NodeGroup) (int, []*apiv1.Pod)
|
Estimate([]PodEquivalenceGroup, *framework.NodeInfo, cloudprovider.NodeGroup) (int, []*apiv1.Pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EstimatorBuilder creates a new estimator object.
|
// EstimatorBuilder creates a new estimator object.
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ package expander
|
||||||
import (
|
import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -53,10 +53,10 @@ type Option struct {
|
||||||
|
|
||||||
// Strategy describes an interface for selecting the best option when scaling up
|
// Strategy describes an interface for selecting the best option when scaling up
|
||||||
type Strategy interface {
|
type Strategy interface {
|
||||||
BestOption(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) *Option
|
BestOption(options []Option, nodeInfo map[string]*framework.NodeInfo) *Option
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter describes an interface for filtering to equally good options according to some criteria
|
// Filter describes an interface for filtering to equally good options according to some criteria
|
||||||
type Filter interface {
|
type Filter interface {
|
||||||
BestOptions(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) []Option
|
BestOptions(options []Option, nodeInfo map[string]*framework.NodeInfo) []Option
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,7 @@ package factory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type chainStrategy struct {
|
type chainStrategy struct {
|
||||||
|
|
@ -34,7 +33,7 @@ func newChainStrategy(filters []expander.Filter, fallback expander.Strategy) exp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *chainStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
func (c *chainStrategy) BestOption(options []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||||
filteredOptions := options
|
filteredOptions := options
|
||||||
for _, filter := range c.filters {
|
for _, filter := range c.filters {
|
||||||
filteredOptions = filter.BestOptions(filteredOptions, nodeInfo)
|
filteredOptions = filter.BestOptions(filteredOptions, nodeInfo)
|
||||||
|
|
|
||||||
|
|
@ -17,12 +17,12 @@ limitations under the License.
|
||||||
package factory
|
package factory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
type substringTestFilterStrategy struct {
|
type substringTestFilterStrategy struct {
|
||||||
|
|
@ -35,7 +35,7 @@ func newSubstringTestFilterStrategy(substring string) *substringTestFilterStrate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
var ret []expander.Option
|
var ret []expander.Option
|
||||||
for _, option := range expansionOptions {
|
for _, option := range expansionOptions {
|
||||||
if strings.Contains(option.Debug, s.substring) {
|
if strings.Contains(option.Debug, s.substring) {
|
||||||
|
|
@ -46,7 +46,7 @@ func (s *substringTestFilterStrategy) BestOptions(expansionOptions []expander.Op
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *substringTestFilterStrategy) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
func (s *substringTestFilterStrategy) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||||
ret := s.BestOptions(expansionOptions, nodeInfo)
|
ret := s.BestOptions(expansionOptions, nodeInfo)
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos"
|
"k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
@ -72,7 +72,7 @@ func createGRPCClient(expanderCert string, expanderUrl string) protos.ExpanderCl
|
||||||
return protos.NewExpanderClient(conn)
|
return protos.NewExpanderClient(conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (g *grpcclientstrategy) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
if g.grpcClient == nil {
|
if g.grpcClient == nil {
|
||||||
klog.Errorf("Incorrect gRPC client config, filtering no options")
|
klog.Errorf("Incorrect gRPC client config, filtering no options")
|
||||||
return expansionOptions
|
return expansionOptions
|
||||||
|
|
@ -117,7 +117,7 @@ func populateOptionsForGRPC(expansionOptions []expander.Option) ([]*protos.Optio
|
||||||
}
|
}
|
||||||
|
|
||||||
// populateNodeInfoForGRPC looks at the corresponding v1.Node object per NodeInfo object, and populates the grpcNodeInfoMap with these to pass over grpc
|
// populateNodeInfoForGRPC looks at the corresponding v1.Node object per NodeInfo object, and populates the grpcNodeInfoMap with these to pass over grpc
|
||||||
func populateNodeInfoForGRPC(nodeInfos map[string]*schedulerframework.NodeInfo) map[string]*v1.Node {
|
func populateNodeInfoForGRPC(nodeInfos map[string]*framework.NodeInfo) map[string]*v1.Node {
|
||||||
grpcNodeInfoMap := make(map[string]*v1.Node)
|
grpcNodeInfoMap := make(map[string]*v1.Node)
|
||||||
for nodeId, nodeInfo := range nodeInfos {
|
for nodeId, nodeInfo := range nodeInfos {
|
||||||
grpcNodeInfoMap[nodeId] = nodeInfo.Node()
|
grpcNodeInfoMap[nodeId] = nodeInfo.Node()
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos"
|
"k8s.io/autoscaler/cluster-autoscaler/expander/grpcplugin/protos"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander/mocks"
|
"k8s.io/autoscaler/cluster-autoscaler/expander/mocks"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
|
|
@ -124,11 +124,10 @@ func TestPopulateOptionsForGrpc(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeFakeNodeInfos() map[string]*schedulerframework.NodeInfo {
|
func makeFakeNodeInfos() map[string]*framework.NodeInfo {
|
||||||
nodeInfos := make(map[string]*schedulerframework.NodeInfo)
|
nodeInfos := make(map[string]*framework.NodeInfo)
|
||||||
for i, opt := range options {
|
for i, opt := range options {
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewTestNodeInfo(nodes[i])
|
||||||
nodeInfo.SetNode(nodes[i])
|
|
||||||
nodeInfos[opt.NodeGroup.Id()] = nodeInfo
|
nodeInfos[opt.NodeGroup.Id()] = nodeInfo
|
||||||
}
|
}
|
||||||
return nodeInfos
|
return nodeInfos
|
||||||
|
|
@ -251,7 +250,7 @@ func TestBestOptionsErrors(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
desc string
|
desc string
|
||||||
client grpcclientstrategy
|
client grpcclientstrategy
|
||||||
nodeInfo map[string]*schedulerframework.NodeInfo
|
nodeInfo map[string]*framework.NodeInfo
|
||||||
mockResponse protos.BestOptionsResponse
|
mockResponse protos.BestOptionsResponse
|
||||||
errResponse error
|
errResponse error
|
||||||
}{
|
}{
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
type leastnodes struct {
|
type leastnodes struct {
|
||||||
|
|
@ -32,7 +32,7 @@ func NewFilter() expander.Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestOptions selects the expansion option that uses the least number of nodes
|
// BestOptions selects the expansion option that uses the least number of nodes
|
||||||
func (m *leastnodes) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (m *leastnodes) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
leastNodes := math.MaxInt
|
leastNodes := math.MaxInt
|
||||||
var leastOptions []expander.Option
|
var leastOptions []expander.Option
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ package mostpods
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mostpods struct {
|
type mostpods struct {
|
||||||
|
|
@ -30,7 +30,7 @@ func NewFilter() expander.Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestOptions selects the expansion option that schedules the most pods
|
// BestOptions selects the expansion option that schedules the most pods
|
||||||
func (m *mostpods) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (m *mostpods) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
var maxPods int
|
var maxPods int
|
||||||
var maxOptions []expander.Option
|
var maxOptions []expander.Option
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,9 +26,9 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
@ -87,7 +87,7 @@ func NewFilter(cloudProvider cloudprovider.CloudProvider,
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestOption selects option based on cost and preferred node type.
|
// BestOption selects option based on cost and preferred node type.
|
||||||
func (p *priceBased) BestOptions(expansionOptions []expander.Option, nodeInfos map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (p *priceBased) BestOptions(expansionOptions []expander.Option, nodeInfos map[string]*framework.NodeInfo) []expander.Option {
|
||||||
var bestOptions []expander.Option
|
var bestOptions []expander.Option
|
||||||
bestOptionScore := 0.0
|
bestOptionScore := 0.0
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
|
||||||
|
|
@ -28,8 +28,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
@ -90,13 +90,10 @@ func TestPriceExpander(t *testing.T) {
|
||||||
ng2, _ := provider.NodeGroupForNode(n2)
|
ng2, _ := provider.NodeGroupForNode(n2)
|
||||||
ng3, _ := provider.NewNodeGroup("MT1", nil, nil, nil, nil)
|
ng3, _ := provider.NewNodeGroup("MT1", nil, nil, nil, nil)
|
||||||
|
|
||||||
ni1 := schedulerframework.NewNodeInfo()
|
ni1 := framework.NewTestNodeInfo(n1)
|
||||||
ni1.SetNode(n1)
|
ni2 := framework.NewTestNodeInfo(n2)
|
||||||
ni2 := schedulerframework.NewNodeInfo()
|
ni3 := framework.NewTestNodeInfo(n3)
|
||||||
ni2.SetNode(n2)
|
nodeInfosForGroups := map[string]*framework.NodeInfo{
|
||||||
ni3 := schedulerframework.NewNodeInfo()
|
|
||||||
ni3.SetNode(n3)
|
|
||||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
|
||||||
"ng1": ni1, "ng2": ni2,
|
"ng1": ni1, "ng2": ni2,
|
||||||
}
|
}
|
||||||
var pricingModel cloudprovider.PricingModel
|
var pricingModel cloudprovider.PricingModel
|
||||||
|
|
|
||||||
|
|
@ -26,10 +26,10 @@ import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
v1lister "k8s.io/client-go/listers/core/v1"
|
v1lister "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -116,7 +116,7 @@ func (p *priority) parsePrioritiesYAMLString(prioritiesYAML string) (priorities,
|
||||||
return newPriorities, nil
|
return newPriorities, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *priority) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (p *priority) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
if len(expansionOptions) <= 0 {
|
if len(expansionOptions) <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
type random struct {
|
type random struct {
|
||||||
|
|
@ -37,7 +37,7 @@ func NewStrategy() expander.Strategy {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestOptions selects from the expansion options at random
|
// BestOptions selects from the expansion options at random
|
||||||
func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
best := r.BestOption(expansionOptions, nodeInfo)
|
best := r.BestOption(expansionOptions, nodeInfo)
|
||||||
if best == nil {
|
if best == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -46,7 +46,7 @@ func (r *random) BestOptions(expansionOptions []expander.Option, nodeInfo map[st
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestOption selects from the expansion options at random
|
// BestOption selects from the expansion options at random
|
||||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) *expander.Option {
|
||||||
if len(expansionOptions) <= 0 {
|
if len(expansionOptions) <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,8 +20,8 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type leastwaste struct {
|
type leastwaste struct {
|
||||||
|
|
@ -33,7 +33,7 @@ func NewFilter() expander.Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestOption Finds the option that wastes the least fraction of CPU and Memory
|
// BestOption Finds the option that wastes the least fraction of CPU and Memory
|
||||||
func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) []expander.Option {
|
func (l *leastwaste) BestOptions(expansionOptions []expander.Option, nodeInfo map[string]*framework.NodeInfo) []expander.Option {
|
||||||
var leastWastedScore float64
|
var leastWastedScore float64
|
||||||
var leastWastedOptions []expander.Option
|
var leastWastedOptions []expander.Option
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FakeNodeGroup struct {
|
type FakeNodeGroup struct {
|
||||||
|
|
@ -47,7 +47,7 @@ func (f *FakeNodeGroup) Debug() string { return f.id }
|
||||||
func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||||
return []cloudprovider.Instance{}, nil
|
return []cloudprovider.Instance{}, nil
|
||||||
}
|
}
|
||||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
func (f *FakeNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
func (f *FakeNodeGroup) Exist() bool { return true }
|
func (f *FakeNodeGroup) Exist() bool { return true }
|
||||||
|
|
@ -60,7 +60,7 @@ func (f *FakeNodeGroup) GetOptions(defaults config.NodeGroupAutoscalingOptions)
|
||||||
return nil, cloudprovider.ErrNotImplemented
|
return nil, cloudprovider.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeInfo {
|
func makeNodeInfo(cpu int64, memory int64, pods int64) *framework.NodeInfo {
|
||||||
node := &apiv1.Node{
|
node := &apiv1.Node{
|
||||||
Status: apiv1.NodeStatus{
|
Status: apiv1.NodeStatus{
|
||||||
Capacity: apiv1.ResourceList{
|
Capacity: apiv1.ResourceList{
|
||||||
|
|
@ -73,8 +73,7 @@ func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeI
|
||||||
node.Status.Allocatable = node.Status.Capacity
|
node.Status.Allocatable = node.Status.Capacity
|
||||||
SetNodeReadyState(node, true, time.Time{})
|
SetNodeReadyState(node, true, time.Time{})
|
||||||
|
|
||||||
nodeInfo := schedulerframework.NewNodeInfo()
|
nodeInfo := framework.NewTestNodeInfo(node)
|
||||||
nodeInfo.SetNode(node)
|
|
||||||
|
|
||||||
return nodeInfo
|
return nodeInfo
|
||||||
}
|
}
|
||||||
|
|
@ -84,7 +83,7 @@ func TestLeastWaste(t *testing.T) {
|
||||||
memoryPerPod := int64(1000 * 1024 * 1024)
|
memoryPerPod := int64(1000 * 1024 * 1024)
|
||||||
e := NewFilter()
|
e := NewFilter()
|
||||||
balancedNodeInfo := makeNodeInfo(16*cpuPerPod, 16*memoryPerPod, 100)
|
balancedNodeInfo := makeNodeInfo(16*cpuPerPod, 16*memoryPerPod, 100)
|
||||||
nodeMap := map[string]*schedulerframework.NodeInfo{"balanced": balancedNodeInfo}
|
nodeMap := map[string]*framework.NodeInfo{"balanced": balancedNodeInfo}
|
||||||
balancedOption := expander.Option{NodeGroup: &FakeNodeGroup{"balanced"}, NodeCount: 1}
|
balancedOption := expander.Option{NodeGroup: &FakeNodeGroup{"balanced"}, NodeCount: 1}
|
||||||
|
|
||||||
// Test without any pods, one node info
|
// Test without any pods, one node info
|
||||||
|
|
|
||||||
|
|
@ -20,14 +20,14 @@ import (
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeGroupListProcessor processes lists of NodeGroups considered in scale-up.
|
// NodeGroupListProcessor processes lists of NodeGroups considered in scale-up.
|
||||||
type NodeGroupListProcessor interface {
|
type NodeGroupListProcessor interface {
|
||||||
Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup,
|
Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup,
|
||||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
nodeInfos map[string]*framework.NodeInfo,
|
||||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error)
|
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error)
|
||||||
CleanUp()
|
CleanUp()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -41,8 +41,8 @@ func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process processes lists of unschedulable and scheduled pods before scaling of the cluster.
|
// Process processes lists of unschedulable and scheduled pods before scaling of the cluster.
|
||||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo,
|
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*framework.NodeInfo,
|
||||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) {
|
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*framework.NodeInfo, error) {
|
||||||
return nodeGroups, nodeInfos, nil
|
return nodeGroups, nodeInfos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ package nodegroupset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateAwsNodeInfoComparator returns a comparator that checks if two nodes should be considered
|
// CreateAwsNodeInfoComparator returns a comparator that checks if two nodes should be considered
|
||||||
|
|
@ -42,7 +42,7 @@ func CreateAwsNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config.N
|
||||||
awsIgnoredLabels[k] = true
|
awsIgnoredLabels[k] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
return func(n1, n2 *framework.NodeInfo) bool {
|
||||||
return IsCloudProviderNodeInfoSimilar(n1, n2, awsIgnoredLabels, ratioOpts)
|
return IsCloudProviderNodeInfoSimilar(n1, n2, awsIgnoredLabels, ratioOpts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ package nodegroupset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AzureNodepoolLegacyLabel is a label specifying which Azure node pool a particular node belongs to.
|
// AzureNodepoolLegacyLabel is a label specifying which Azure node pool a particular node belongs to.
|
||||||
|
|
@ -40,13 +40,13 @@ const aksConsolidatedAdditionalProperties = "kubernetes.azure.com/consolidated-a
|
||||||
// AKS node image version
|
// AKS node image version
|
||||||
const aksNodeImageVersion = "kubernetes.azure.com/node-image-version"
|
const aksNodeImageVersion = "kubernetes.azure.com/node-image-version"
|
||||||
|
|
||||||
func nodesFromSameAzureNodePool(n1, n2 *schedulerframework.NodeInfo) bool {
|
func nodesFromSameAzureNodePool(n1, n2 *framework.NodeInfo) bool {
|
||||||
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLabel]
|
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLabel]
|
||||||
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLabel]
|
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLabel]
|
||||||
return (n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool) || nodesFromSameAzureNodePoolLegacy(n1, n2)
|
return (n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool) || nodesFromSameAzureNodePoolLegacy(n1, n2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodesFromSameAzureNodePoolLegacy(n1, n2 *schedulerframework.NodeInfo) bool {
|
func nodesFromSameAzureNodePoolLegacy(n1, n2 *framework.NodeInfo) bool {
|
||||||
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLegacyLabel]
|
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLegacyLabel]
|
||||||
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLegacyLabel]
|
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLegacyLabel]
|
||||||
return n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool
|
return n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool
|
||||||
|
|
@ -74,7 +74,7 @@ func CreateAzureNodeInfoComparator(extraIgnoredLabels []string, ratioOpts config
|
||||||
azureIgnoredLabels[k] = true
|
azureIgnoredLabels[k] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
return func(n1, n2 *framework.NodeInfo) bool {
|
||||||
if nodesFromSameAzureNodePool(n1, n2) {
|
if nodesFromSameAzureNodePool(n1, n2) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,8 @@ import (
|
||||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
@ -110,12 +110,10 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) {
|
||||||
provider.AddNode("ng1", n1)
|
provider.AddNode("ng1", n1)
|
||||||
provider.AddNode("ng2", n2)
|
provider.AddNode("ng2", n2)
|
||||||
|
|
||||||
ni1 := schedulerframework.NewNodeInfo()
|
ni1 := framework.NewTestNodeInfo(n1)
|
||||||
ni1.SetNode(n1)
|
ni2 := framework.NewTestNodeInfo(n2)
|
||||||
ni2 := schedulerframework.NewNodeInfo()
|
|
||||||
ni2.SetNode(n2)
|
|
||||||
|
|
||||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
nodeInfosForGroups := map[string]*framework.NodeInfo{
|
||||||
"ng1": ni1, "ng2": ni2,
|
"ng1": ni1, "ng2": ni2,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -141,8 +139,7 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) {
|
||||||
n3 := BuildTestNode("n1", 1000, 1000)
|
n3 := BuildTestNode("n1", 1000, 1000)
|
||||||
provider.AddNodeGroup("ng3", 1, 10, 1)
|
provider.AddNodeGroup("ng3", 1, 10, 1)
|
||||||
provider.AddNode("ng3", n3)
|
provider.AddNode("ng3", n3)
|
||||||
ni3 := schedulerframework.NewNodeInfo()
|
ni3 := framework.NewTestNodeInfo(n3)
|
||||||
ni3.SetNode(n3)
|
|
||||||
nodeInfosForGroups["ng3"] = ni3
|
nodeInfosForGroups["ng3"] = ni3
|
||||||
ng3, _ := provider.NodeGroupForNode(n3)
|
ng3, _ := provider.NodeGroupForNode(n3)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,8 @@ import (
|
||||||
|
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||||
|
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
|
||||||
|
|
||||||
klog "k8s.io/klog/v2"
|
klog "k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
@ -35,7 +35,7 @@ type BalancingNodeGroupSetProcessor struct {
|
||||||
// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one using the
|
// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one using the
|
||||||
// BalancingNodeGroupSetProcessor's comparator function.
|
// BalancingNodeGroupSetProcessor's comparator function.
|
||||||
func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
nodeInfosForGroups map[string]*framework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||||
|
|
||||||
result := []cloudprovider.NodeGroup{}
|
result := []cloudprovider.NodeGroup{}
|
||||||
nodeGroupId := nodeGroup.Id()
|
nodeGroupId := nodeGroup.Id()
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue