Address recent breaking changes in scheduler
The following things changed in scheduler and needed to be fixed: * NodeInfo was moved to schedulerframework * Some fields on NodeInfo are now exposed directly instead of via getters * NodeInfo.Pods is now a list of *schedulerframework.PodInfo, not *apiv1.Pod * SharedLister and NodeInfoLister were moved to schedulerframework * PodLister was removed
This commit is contained in:
parent
a804d1bac4
commit
73a5cdf928
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// Asg implements NodeGroup interface.
|
||||
|
|
@ -173,7 +173,7 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
template, err := asg.manager.getAsgTemplate(asg.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -185,7 +185,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id))
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -318,7 +318,7 @@ func (ng *AwsNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
template, err := ng.awsManager.getAsgTemplate(ng.asg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -329,7 +329,7 @@ func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name))
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/legacy-cloud-providers/azure/retry"
|
||||
)
|
||||
|
||||
|
|
@ -524,7 +524,7 @@ func (as *AgentPool) Debug() string {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this agent pool.
|
||||
func (as *AgentPool) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (as *AgentPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
//AKSAgentPool implements NodeGroup interface for agent pool deployed in AKS
|
||||
|
|
@ -412,7 +412,7 @@ func (agentPool *AKSAgentPool) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
//TemplateNodeInfo is not implemented.
|
||||
func (agentPool *AKSAgentPool) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (agentPool *AKSAgentPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
cloudvolume "k8s.io/cloud-provider/volume"
|
||||
"k8s.io/klog"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/legacy-cloud-providers/azure/retry"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
|
||||
|
|
@ -636,7 +636,7 @@ func extractTaintsFromScaleSet(tags map[string]*string) []apiv1.Taint {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this scale set.
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
template, rerr := scaleSet.getVMSSInfo()
|
||||
if rerr != nil {
|
||||
return nil, rerr.Error()
|
||||
|
|
@ -647,7 +647,7 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name))
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -294,7 +294,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
nodeInfo, err := asg.TemplateNodeInfo()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.NotEmpty(t, nodeInfo.Pods())
|
||||
assert.NotEmpty(t, nodeInfo.Pods)
|
||||
}
|
||||
func TestExtractLabelsFromScaleSet(t *testing.T) {
|
||||
expectedNodeLabelKey := "zip"
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -348,13 +348,13 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulernodeinfo.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
template, err := asg.baiducloudManager.getAsgTemplate(asg.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -363,7 +363,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name))
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -144,13 +144,13 @@ type NodeGroup interface {
|
|||
// This list should include also instances that might have not become a kubernetes node yet.
|
||||
Nodes() ([]Instance, error)
|
||||
|
||||
// TemplateNodeInfo returns a schedulernodeinfo.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error)
|
||||
TemplateNodeInfo() (*schedulerframework.NodeInfo, error)
|
||||
|
||||
// Exist checks if the node group really exists on the cloud provider side. Allows to tell the
|
||||
// theoretical node group from the real one. Implementation required.
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -213,7 +213,7 @@ func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
// allocatable information as well as all pods that are started on the
|
||||
// node by default, using manifest (most likely only kube-proxy).
|
||||
// Implementation optional.
|
||||
func (ng *nodegroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (ng *nodegroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -194,14 +194,14 @@ func (n *NodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return toInstances(n.nodePool.Nodes), nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulernodeinfo.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The
|
||||
// returned NodeInfo is expected to have a fully populated Node object, with
|
||||
// all of the labels, capacity and allocatable information as well as all pods
|
||||
// that are started on the node by default, using manifest (most likely only
|
||||
// kube-proxy). Implementation optional.
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (n *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -328,12 +328,12 @@ func (mig *gceMig) Autoprovisioned() bool {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (mig *gceMig) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (mig *gceMig) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
node, err := mig.gceManager.GetMigTemplateNode(mig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -266,7 +266,7 @@ func (nodeGroup *NodeGroup) DecreaseTargetSize(delta int) error {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -38,7 +38,7 @@ type magnumManager interface {
|
|||
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
||||
getClusterStatus() (string, error)
|
||||
canUpdate() (bool, string, error)
|
||||
templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error)
|
||||
templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error)
|
||||
}
|
||||
|
||||
// createMagnumManager creates the desired implementation of magnumManager.
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/version"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -282,7 +282,7 @@ func (mgr *magnumManagerHeat) getStackStatus() (string, error) {
|
|||
|
||||
// templateNodeInfo returns a NodeInfo with a node template based on the VM flavor
|
||||
// that is used to created minions in a given node group.
|
||||
func (mgr *magnumManagerHeat) templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (mgr *magnumManagerHeat) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
||||
// TODO: create a node template by getting the minion flavor from the heat stack.
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// magnumNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider.
|
||||
|
|
@ -281,7 +281,7 @@ func (ng *magnumNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *magnumNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (ng *magnumNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return ng.magnumManager.templateNodeInfo(ng.id)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/stretchr/testify/mock"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type magnumManagerMock struct {
|
||||
|
|
@ -64,8 +64,8 @@ func (m *magnumManagerMock) canUpdate() (bool, string, error) {
|
|||
return args.Bool(0), args.String(1), args.Error(2)
|
||||
}
|
||||
|
||||
func (m *magnumManagerMock) templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
return &schedulernodeinfo.NodeInfo{}, nil
|
||||
func (m *magnumManagerMock) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
||||
return &schedulerframework.NodeInfo{}, nil
|
||||
}
|
||||
|
||||
func createTestNodeGroup(manager magnumManager) *magnumNodeGroup {
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ limitations under the License.
|
|||
|
||||
package mocks
|
||||
|
||||
import cache "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
import schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
import cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -234,15 +234,15 @@ func (_m *NodeGroup) TargetSize() (int, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo provides a mock function with given fields:
|
||||
func (_m *NodeGroup) TemplateNodeInfo() (*cache.NodeInfo, error) {
|
||||
func (_m *NodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *cache.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func() *cache.NodeInfo); ok {
|
||||
var r0 *schedulerframework.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func() *schedulerframework.NodeInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*cache.NodeInfo)
|
||||
r0 = ret.Get(0).(*schedulerframework.NodeInfo)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -45,7 +45,7 @@ type packetManager interface {
|
|||
getNodes(nodegroup string) ([]string, error)
|
||||
getNodeNames(nodegroup string) ([]string, error)
|
||||
deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error
|
||||
templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error)
|
||||
templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error)
|
||||
}
|
||||
|
||||
// createPacketManager creates the desired implementation of packetManager.
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type packetManagerRest struct {
|
||||
|
|
@ -394,7 +394,7 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd
|
|||
|
||||
// templateNodeInfo returns a NodeInfo with a node template based on the VM flavor
|
||||
// that is used to created minions in a given node group.
|
||||
func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// packetNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider.
|
||||
|
|
@ -254,7 +254,7 @@ func (ng *packetNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *packetNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (ng *packetNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return ng.packetManager.templateNodeInfo(ng.id)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// OnScaleUpFunc is a function called on node group increase in TestCloudProvider.
|
||||
|
|
@ -50,7 +50,7 @@ type TestCloudProvider struct {
|
|||
onNodeGroupCreate func(string) error
|
||||
onNodeGroupDelete func(string) error
|
||||
machineTypes []string
|
||||
machineTemplates map[string]*schedulernodeinfo.NodeInfo
|
||||
machineTemplates map[string]*schedulerframework.NodeInfo
|
||||
priceModel cloudprovider.PricingModel
|
||||
resourceLimiter *cloudprovider.ResourceLimiter
|
||||
}
|
||||
|
|
@ -69,7 +69,7 @@ func NewTestCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc)
|
|||
// NewTestAutoprovisioningCloudProvider builds new TestCloudProvider with autoprovisioning support
|
||||
func NewTestAutoprovisioningCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc,
|
||||
onNodeGroupCreate OnNodeGroupCreateFunc, onNodeGroupDelete OnNodeGroupDeleteFunc,
|
||||
machineTypes []string, machineTemplates map[string]*schedulernodeinfo.NodeInfo) *TestCloudProvider {
|
||||
machineTypes []string, machineTemplates map[string]*schedulerframework.NodeInfo) *TestCloudProvider {
|
||||
return &TestCloudProvider{
|
||||
nodes: make(map[string]string),
|
||||
groups: make(map[string]cloudprovider.NodeGroup),
|
||||
|
|
@ -433,7 +433,7 @@ func (tng *TestNodeGroup) Autoprovisioned() bool {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
if tng.cloudProvider.machineTemplates == nil {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -127,7 +127,7 @@ type ClusterStateRegistry struct {
|
|||
scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest
|
||||
scaleDownRequests []*ScaleDownRequest
|
||||
nodes []*apiv1.Node
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo
|
||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo
|
||||
cloudProvider cloudprovider.CloudProvider
|
||||
perNodeGroupReadiness map[string]Readiness
|
||||
totalReadiness Readiness
|
||||
|
|
@ -297,7 +297,7 @@ func (csr *ClusterStateRegistry) registerFailedScaleUpNoLock(nodeGroup cloudprov
|
|||
}
|
||||
|
||||
// UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats
|
||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo, currentTime time.Time) error {
|
||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) error {
|
||||
csr.updateNodeGroupMetrics()
|
||||
targetSizes, err := getTargetSizes(csr.cloudProvider)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
|
||||
|
|
@ -142,8 +141,14 @@ func TestFilterOutSchedulableByPacking(t *testing.T) {
|
|||
assert.ElementsMatch(t, stillPendingPods, expectedPendingPods, "pending pods differ")
|
||||
|
||||
// Check if snapshot was correctly modified
|
||||
podsInSnapshot, err := clusterSnapshot.Pods().List(labels.Everything())
|
||||
nodeInfos, err := clusterSnapshot.NodeInfos().List()
|
||||
assert.NoError(t, err)
|
||||
var podsInSnapshot []*apiv1.Pod
|
||||
for _, nodeInfo := range nodeInfos {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
podsInSnapshot = append(podsInSnapshot, podInfo.Pod)
|
||||
}
|
||||
}
|
||||
assert.ElementsMatch(t, podsInSnapshot, expectedPodsInSnapshot, "pods in snapshot differ")
|
||||
|
||||
// Verify hints map; it is very whitebox but better than nothing
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1beta1"
|
||||
|
|
@ -394,7 +394,7 @@ func (sd *ScaleDown) CleanUpUnneededNodes() {
|
|||
sd.unneededNodes = make(map[string]time.Time)
|
||||
}
|
||||
|
||||
func (sd *ScaleDown) checkNodeUtilization(timestamp time.Time, node *apiv1.Node, nodeInfo *schedulernodeinfo.NodeInfo) (simulator.UnremovableReason, *simulator.UtilizationInfo) {
|
||||
func (sd *ScaleDown) checkNodeUtilization(timestamp time.Time, node *apiv1.Node, nodeInfo *schedulerframework.NodeInfo) (simulator.UnremovableReason, *simulator.UtilizationInfo) {
|
||||
// Skip nodes that were recently checked.
|
||||
if _, found := sd.unremovableNodes[node.Name]; found {
|
||||
return simulator.RecentlyUnremovable, nil
|
||||
|
|
@ -1332,16 +1332,16 @@ const (
|
|||
apiServerLabelValue = "kube-apiserver"
|
||||
)
|
||||
|
||||
func isMasterNode(nodeInfo *schedulernodeinfo.NodeInfo) bool {
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
if pod.Namespace == metav1.NamespaceSystem && pod.Labels[apiServerLabelKey] == apiServerLabelValue {
|
||||
func isMasterNode(nodeInfo *schedulerframework.NodeInfo) bool {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
if podInfo.Pod.Namespace == metav1.NamespaceSystem && podInfo.Pod.Labels[apiServerLabelKey] == apiServerLabelValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func filterOutMasters(nodeInfos []*schedulernodeinfo.NodeInfo) []*apiv1.Node {
|
||||
func filterOutMasters(nodeInfos []*schedulerframework.NodeInfo) []*apiv1.Node {
|
||||
result := make([]*apiv1.Node, 0, len(nodeInfos))
|
||||
for _, nodeInfo := range nodeInfos {
|
||||
if !isMasterNode(nodeInfo) {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
autoscaler_errors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
|
|
@ -1487,12 +1487,12 @@ func TestFilterOutMasters(t *testing.T) {
|
|||
{"n6", 2000, 8000, 0, true, ""}, // same machine type, no node group, no api server
|
||||
{"n7", 2000, 8000, 0, true, ""}, // real master
|
||||
}
|
||||
nodes := make([]*schedulernodeinfo.NodeInfo, len(nodeConfigs))
|
||||
nodeMap := make(map[string]*schedulernodeinfo.NodeInfo, len(nodeConfigs))
|
||||
nodes := make([]*schedulerframework.NodeInfo, len(nodeConfigs))
|
||||
nodeMap := make(map[string]*schedulerframework.NodeInfo, len(nodeConfigs))
|
||||
for i, n := range nodeConfigs {
|
||||
node := BuildTestNode(n.name, n.cpu, n.memory)
|
||||
SetNodeReadyState(node, n.ready, time.Now())
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
err := nodeInfo.SetNode(node)
|
||||
assert.NoError(t, err)
|
||||
nodes[i] = nodeInfo
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||
kube_client "k8s.io/client-go/kubernetes"
|
||||
kube_record "k8s.io/client-go/tools/record"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type nodeConfig struct {
|
||||
|
|
@ -254,8 +254,8 @@ type mockAutoprovisioningNodeGroupListProcessor struct {
|
|||
t *testing.T
|
||||
}
|
||||
|
||||
func (p *mockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulernodeinfo.NodeInfo, error) {
|
||||
func (p *mockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) {
|
||||
|
||||
machines, err := context.CloudProvider.GetAvailableMachineTypes()
|
||||
assert.NoError(p.t, err)
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/glogx"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -55,7 +55,7 @@ const scaleUpLimitUnknown = math.MaxInt64
|
|||
func computeScaleUpResourcesLeftLimits(
|
||||
cp cloudprovider.CloudProvider,
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodesFromNotAutoscaledGroups []*apiv1.Node,
|
||||
resourceLimiter *cloudprovider.ResourceLimiter) (scaleUpResourcesLimits, errors.AutoscalerError) {
|
||||
totalCores, totalMem, errCoresMem := calculateScaleUpCoresMemoryTotal(nodeGroups, nodeInfos, nodesFromNotAutoscaledGroups)
|
||||
|
|
@ -109,7 +109,7 @@ func computeScaleUpResourcesLeftLimits(
|
|||
|
||||
func calculateScaleUpCoresMemoryTotal(
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodesFromNotAutoscaledGroups []*apiv1.Node) (int64, int64, errors.AutoscalerError) {
|
||||
var coresTotal int64
|
||||
var memoryTotal int64
|
||||
|
|
@ -142,7 +142,7 @@ func calculateScaleUpCoresMemoryTotal(
|
|||
func calculateScaleUpGpusTotal(
|
||||
GPULabel string,
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
nodesFromNotAutoscaledGroups []*apiv1.Node) (map[string]int64, errors.AutoscalerError) {
|
||||
|
||||
result := make(map[string]int64)
|
||||
|
|
@ -185,7 +185,7 @@ func computeBelowMax(total int64, max int64) int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func computeScaleUpResourcesDelta(cp cloudprovider.CloudProvider, nodeInfo *schedulernodeinfo.NodeInfo, nodeGroup cloudprovider.NodeGroup, resourceLimiter *cloudprovider.ResourceLimiter) (scaleUpResourcesDelta, errors.AutoscalerError) {
|
||||
func computeScaleUpResourcesDelta(cp cloudprovider.CloudProvider, nodeInfo *schedulerframework.NodeInfo, nodeGroup cloudprovider.NodeGroup, resourceLimiter *cloudprovider.ResourceLimiter) (scaleUpResourcesDelta, errors.AutoscalerError) {
|
||||
resultScaleUpDelta := make(scaleUpResourcesDelta)
|
||||
|
||||
nodeCPU, nodeMemory := getNodeInfoCoresAndMemory(nodeInfo)
|
||||
|
|
@ -229,7 +229,7 @@ func (limits *scaleUpResourcesLimits) checkScaleUpDeltaWithinLimits(delta scaleU
|
|||
return scaleUpLimitsNotExceeded()
|
||||
}
|
||||
|
||||
func getNodeInfoCoresAndMemory(nodeInfo *schedulernodeinfo.NodeInfo) (int64, int64) {
|
||||
func getNodeInfoCoresAndMemory(nodeInfo *schedulerframework.NodeInfo) (int64, int64) {
|
||||
return utils.GetNodeCoresAndMemory(nodeInfo.Node())
|
||||
}
|
||||
|
||||
|
|
@ -251,7 +251,7 @@ func maxResourceLimitReached(resources []string) *skippedReasons {
|
|||
return &skippedReasons{[]string{fmt.Sprintf("max cluster %s limit reached", strings.Join(resources, ", "))}}
|
||||
}
|
||||
|
||||
func computeExpansionOption(context *context.AutoscalingContext, podEquivalenceGroups []*podEquivalenceGroup, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, upcomingNodes []*schedulernodeinfo.NodeInfo) (expander.Option, error) {
|
||||
func computeExpansionOption(context *context.AutoscalingContext, podEquivalenceGroups []*podEquivalenceGroup, nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, upcomingNodes []*schedulerframework.NodeInfo) (expander.Option, error) {
|
||||
option := expander.Option{
|
||||
NodeGroup: nodeGroup,
|
||||
Pods: make([]*apiv1.Pod, 0),
|
||||
|
|
@ -263,7 +263,11 @@ func computeExpansionOption(context *context.AutoscalingContext, podEquivalenceG
|
|||
}
|
||||
|
||||
// add test node to snapshot
|
||||
if err := context.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), nodeInfo.Pods()); err != nil {
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
if err := context.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), pods); err != nil {
|
||||
klog.Errorf("Error while adding test Node; %v", err)
|
||||
if err := context.ClusterSnapshot.Revert(); err != nil {
|
||||
klog.Fatalf("Error while calling ClusterSnapshot.Revert; %v", err)
|
||||
|
|
@ -306,7 +310,7 @@ func computeExpansionOption(context *context.AutoscalingContext, podEquivalenceG
|
|||
// false if it didn't and error if an error occurred. Assumes that all nodes in the cluster are
|
||||
// ready and in sync with instance groups.
|
||||
func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.AutoscalingProcessors, clusterStateRegistry *clusterstate.ClusterStateRegistry, unschedulablePods []*apiv1.Pod,
|
||||
nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, nodeInfos map[string]*schedulernodeinfo.NodeInfo, ignoredTaints taints.TaintKeySet) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||
nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, nodeInfos map[string]*schedulerframework.NodeInfo, ignoredTaints taints.TaintKeySet) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||
// From now on we only care about unschedulable pods that were marked after the newest
|
||||
// node became available for the scheduler.
|
||||
if len(unschedulablePods) == 0 {
|
||||
|
|
@ -344,7 +348,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto
|
|||
return &status.ScaleUpStatus{Result: status.ScaleUpError}, errLimits.AddPrefix("Could not compute total resources: ")
|
||||
}
|
||||
|
||||
upcomingNodes := make([]*schedulernodeinfo.NodeInfo, 0)
|
||||
upcomingNodes := make([]*schedulerframework.NodeInfo, 0)
|
||||
for nodeGroup, numberOfNodes := range clusterStateRegistry.GetUpcomingNodes() {
|
||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||
if !found {
|
||||
|
|
@ -679,7 +683,7 @@ func applyScaleUpResourcesLimits(
|
|||
cp cloudprovider.CloudProvider,
|
||||
newNodes int,
|
||||
scaleUpResourcesLeft scaleUpResourcesLimits,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
nodeInfo *schedulerframework.NodeInfo,
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
resourceLimiter *cloudprovider.ResourceLimiter) (int, errors.AutoscalerError) {
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ import (
|
|||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
|
|
@ -437,7 +437,7 @@ type reportingStrategy struct {
|
|||
t *testing.T
|
||||
}
|
||||
|
||||
func (r reportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
func (r reportingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
r.results.inputOptions = expanderOptionsToGroupSizeChanges(options)
|
||||
for _, option := range options {
|
||||
groupSizeChange := expanderOptionToGroupSizeChange(option)
|
||||
|
|
@ -823,7 +823,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
|
||||
t1 := BuildTestNode("t1", 4000, 1000000)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulernodeinfo.NewNodeInfo()
|
||||
ti1 := schedulerframework.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -833,7 +833,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
}, nil, func(nodeGroup string) error {
|
||||
createdGroups <- nodeGroup
|
||||
return nil
|
||||
}, nil, []string{"T1"}, map[string]*schedulernodeinfo.NodeInfo{"T1": ti1})
|
||||
}, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1})
|
||||
|
||||
options := config.AutoscalingOptions{
|
||||
EstimatorName: estimator.BinpackingEstimatorName,
|
||||
|
|
@ -875,7 +875,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
|||
|
||||
t1 := BuildTestNode("t1", 100, 1000000)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulernodeinfo.NewNodeInfo()
|
||||
ti1 := schedulerframework.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -885,7 +885,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
|||
}, nil, func(nodeGroup string) error {
|
||||
createdGroups <- nodeGroup
|
||||
return nil
|
||||
}, nil, []string{"T1"}, map[string]*schedulernodeinfo.NodeInfo{"T1": ti1})
|
||||
}, nil, []string{"T1"}, map[string]*schedulerframework.NodeInfo{"T1": ti1})
|
||||
|
||||
options := config.AutoscalingOptions{
|
||||
BalanceSimilarNodeGroups: true,
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||
|
|
@ -74,7 +74,7 @@ type StaticAutoscaler struct {
|
|||
processorCallbacks *staticAutoscalerProcessorCallbacks
|
||||
initialized bool
|
||||
// Caches nodeInfo computed for previously seen nodes
|
||||
nodeInfoCache map[string]*schedulernodeinfo.NodeInfo
|
||||
nodeInfoCache map[string]*schedulerframework.NodeInfo
|
||||
ignoredTaints taints.TaintKeySet
|
||||
}
|
||||
|
||||
|
|
@ -156,7 +156,7 @@ func NewStaticAutoscaler(
|
|||
processors: processors,
|
||||
processorCallbacks: processorCallbacks,
|
||||
clusterStateRegistry: clusterStateRegistry,
|
||||
nodeInfoCache: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
nodeInfoCache: make(map[string]*schedulerframework.NodeInfo),
|
||||
ignoredTaints: ignoredTaints,
|
||||
}
|
||||
}
|
||||
|
|
@ -378,7 +378,11 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
|
|||
// add upcoming nodes to ClusterSnapshot
|
||||
upcomingNodes := getUpcomingNodeInfos(a.clusterStateRegistry, nodeInfosForGroups)
|
||||
for _, upcomingNode := range upcomingNodes {
|
||||
err = a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), upcomingNode.Pods())
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range upcomingNode.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
err = a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), pods)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to add upcoming node %s to cluster snapshot: %v", upcomingNode.Node().Name, err)
|
||||
return errors.ToAutoscalerError(errors.InternalError, err)
|
||||
|
|
@ -734,7 +738,7 @@ func (a *StaticAutoscaler) actOnEmptyCluster(allNodes, readyNodes []*apiv1.Node,
|
|||
return false
|
||||
}
|
||||
|
||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo, currentTime time.Time) errors.AutoscalerError {
|
||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulerframework.NodeInfo, currentTime time.Time) errors.AutoscalerError {
|
||||
err := a.clusterStateRegistry.UpdateNodes(allNodes, nodeInfosForGroups, currentTime)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update node registry: %v", err)
|
||||
|
|
@ -768,23 +772,23 @@ func allPodsAreNew(pods []*apiv1.Pod, currentTime time.Time) bool {
|
|||
return found && oldest.Add(unschedulablePodWithGpuTimeBuffer).After(currentTime)
|
||||
}
|
||||
|
||||
func deepCopyNodeInfo(nodeTemplate *schedulernodeinfo.NodeInfo, index int) *schedulernodeinfo.NodeInfo {
|
||||
func deepCopyNodeInfo(nodeTemplate *schedulerframework.NodeInfo, index int) *schedulerframework.NodeInfo {
|
||||
node := nodeTemplate.Node().DeepCopy()
|
||||
node.Name = fmt.Sprintf("%s-%d", node.Name, index)
|
||||
node.UID = uuid.NewUUID()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
for _, podTemplate := range nodeTemplate.Pods() {
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod.Name = fmt.Sprintf("%s-%d", podTemplate.Name, index)
|
||||
for _, podInfo := range nodeTemplate.Pods {
|
||||
pod := podInfo.Pod.DeepCopy()
|
||||
pod.Name = fmt.Sprintf("%s-%d", podInfo.Pod.Name, index)
|
||||
pod.UID = uuid.NewUUID()
|
||||
nodeInfo.AddPod(pod)
|
||||
}
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
func getUpcomingNodeInfos(registry *clusterstate.ClusterStateRegistry, nodeInfos map[string]*schedulernodeinfo.NodeInfo) []*schedulernodeinfo.NodeInfo {
|
||||
upcomingNodes := make([]*schedulernodeinfo.NodeInfo, 0)
|
||||
func getUpcomingNodeInfos(registry *clusterstate.ClusterStateRegistry, nodeInfos map[string]*schedulerframework.NodeInfo) []*schedulerframework.NodeInfo {
|
||||
upcomingNodes := make([]*schedulerframework.NodeInfo, 0)
|
||||
for nodeGroup, numberOfNodes := range registry.GetUpcomingNodes() {
|
||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||
if !found {
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
v1appslister "k8s.io/client-go/listers/apps/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
|
@ -153,7 +153,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
p2 := BuildTestPod("p2", 600, 100)
|
||||
|
||||
tn := BuildTestNode("tn", 1000, 1000)
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni := schedulerframework.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -163,7 +163,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
return onScaleDownMock.ScaleDown(id, name)
|
||||
},
|
||||
nil, nil,
|
||||
nil, map[string]*schedulernodeinfo.NodeInfo{"ng1": tni, "ng2": tni})
|
||||
nil, map[string]*schedulerframework.NodeInfo{"ng1": tni, "ng2": tni})
|
||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||
provider.AddNode("ng1", n1)
|
||||
ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup)
|
||||
|
|
@ -323,15 +323,15 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
|
||||
tn1 := BuildTestNode("tn1", 100, 1000)
|
||||
SetNodeReadyState(tn1, true, time.Now())
|
||||
tni1 := schedulernodeinfo.NewNodeInfo()
|
||||
tni1 := schedulerframework.NewNodeInfo()
|
||||
tni1.SetNode(tn1)
|
||||
tn2 := BuildTestNode("tn2", 1000, 1000)
|
||||
SetNodeReadyState(tn2, true, time.Now())
|
||||
tni2 := schedulernodeinfo.NewNodeInfo()
|
||||
tni2 := schedulerframework.NewNodeInfo()
|
||||
tni2.SetNode(tn2)
|
||||
tn3 := BuildTestNode("tn3", 100, 1000)
|
||||
SetNodeReadyState(tn2, true, time.Now())
|
||||
tni3 := schedulernodeinfo.NewNodeInfo()
|
||||
tni3 := schedulerframework.NewNodeInfo()
|
||||
tni3.SetNode(tn3)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -344,7 +344,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
}, func(id string) error {
|
||||
return onNodeGroupDeleteMock.Delete(id)
|
||||
},
|
||||
[]string{"TN1", "TN2"}, map[string]*schedulernodeinfo.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||
[]string{"TN1", "TN2"}, map[string]*schedulerframework.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||
provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN1", 0, 10, 0, "TN1")
|
||||
autoprovisionedTN1 := reflect.ValueOf(provider.GetNodeGroup("autoprovisioned-TN1")).Interface().(*testprovider.TestNodeGroup)
|
||||
|
|
|
|||
|
|
@ -33,22 +33,22 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// GetNodeInfosForGroups finds NodeInfos for all node groups used to manage the given nodes. It also returns a node group to sample node mapping.
|
||||
func GetNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedulernodeinfo.NodeInfo, cloudProvider cloudprovider.CloudProvider, listers kube_util.ListerRegistry,
|
||||
func GetNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedulerframework.NodeInfo, cloudProvider cloudprovider.CloudProvider, listers kube_util.ListerRegistry,
|
||||
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
|
||||
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
|
||||
daemonsets []*appsv1.DaemonSet, predicateChecker simulator.PredicateChecker, ignoredTaints taints.TaintKeySet) (map[string]*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
result := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
daemonsets []*appsv1.DaemonSet, predicateChecker simulator.PredicateChecker, ignoredTaints taints.TaintKeySet) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
result := make(map[string]*schedulerframework.NodeInfo)
|
||||
seenGroups := make(map[string]bool)
|
||||
|
||||
podsForNodes, err := getPodsForNodes(listers)
|
||||
if err != nil {
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, err
|
||||
return map[string]*schedulerframework.NodeInfo{}, err
|
||||
}
|
||||
|
||||
// processNode returns information whether the nodeTemplate was generated and if there was an error.
|
||||
|
|
@ -84,7 +84,7 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
}
|
||||
added, id, typedErr := processNode(node)
|
||||
if typedErr != nil {
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, typedErr
|
||||
return map[string]*schedulerframework.NodeInfo{}, typedErr
|
||||
}
|
||||
if added && nodeInfoCache != nil {
|
||||
if nodeInfoCopy, err := deepCopyNodeInfo(result[id]); err == nil {
|
||||
|
|
@ -117,7 +117,7 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
continue
|
||||
} else {
|
||||
klog.Errorf("Unable to build proper template node for %s: %v", id, err)
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
||||
return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
||||
}
|
||||
}
|
||||
result[id] = nodeInfo
|
||||
|
|
@ -136,11 +136,11 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
if !kube_util.IsNodeReadyAndSchedulable(node) {
|
||||
added, _, typedErr := processNode(node)
|
||||
if typedErr != nil {
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, typedErr
|
||||
return map[string]*schedulerframework.NodeInfo{}, typedErr
|
||||
}
|
||||
nodeGroup, err := cloudProvider.NodeGroupForNode(node)
|
||||
if err != nil {
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, errors.ToAutoscalerError(
|
||||
return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(
|
||||
errors.CloudProviderError, err)
|
||||
}
|
||||
if added {
|
||||
|
|
@ -165,7 +165,7 @@ func getPodsForNodes(listers kube_util.ListerRegistry) (map[string][]*apiv1.Pod,
|
|||
}
|
||||
|
||||
// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
|
||||
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker simulator.PredicateChecker, ignoredTaints taints.TaintKeySet) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker simulator.PredicateChecker, ignoredTaints taints.TaintKeySet) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
id := nodeGroup.Id()
|
||||
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
||||
if err != nil {
|
||||
|
|
@ -176,8 +176,10 @@ func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap
|
|||
if err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
pods = append(pods, baseNodeInfo.Pods()...)
|
||||
fullNodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
for _, podInfo := range baseNodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
fullNodeInfo := schedulerframework.NewNodeInfo(pods...)
|
||||
fullNodeInfo.SetNode(baseNodeInfo.Node())
|
||||
sanitizedNodeInfo, typedErr := sanitizeNodeInfo(fullNodeInfo, id, ignoredTaints)
|
||||
if typedErr != nil {
|
||||
|
|
@ -203,21 +205,21 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func deepCopyNodeInfo(nodeInfo *schedulernodeinfo.NodeInfo) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
func deepCopyNodeInfo(nodeInfo *schedulerframework.NodeInfo) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
newPods := make([]*apiv1.Pod, 0)
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
newPods = append(newPods, pod.DeepCopy())
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
newPods = append(newPods, podInfo.Pod.DeepCopy())
|
||||
}
|
||||
|
||||
// Build a new node info.
|
||||
newNodeInfo := schedulernodeinfo.NewNodeInfo(newPods...)
|
||||
newNodeInfo := schedulerframework.NewNodeInfo(newPods...)
|
||||
if err := newNodeInfo.SetNode(nodeInfo.Node().DeepCopy()); err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
return newNodeInfo, nil
|
||||
}
|
||||
|
||||
func sanitizeNodeInfo(nodeInfo *schedulernodeinfo.NodeInfo, nodeGroupName string, ignoredTaints taints.TaintKeySet) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
func sanitizeNodeInfo(nodeInfo *schedulerframework.NodeInfo, nodeGroupName string, ignoredTaints taints.TaintKeySet) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
// Sanitize node name.
|
||||
sanitizedNode, err := sanitizeTemplateNode(nodeInfo.Node(), nodeGroupName, ignoredTaints)
|
||||
if err != nil {
|
||||
|
|
@ -226,14 +228,14 @@ func sanitizeNodeInfo(nodeInfo *schedulernodeinfo.NodeInfo, nodeGroupName string
|
|||
|
||||
// Update nodename in pods.
|
||||
sanitizedPods := make([]*apiv1.Pod, 0)
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
sanitizedPod := pod.DeepCopy()
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
sanitizedPod := podInfo.Pod.DeepCopy()
|
||||
sanitizedPod.Spec.NodeName = sanitizedNode.Name
|
||||
sanitizedPods = append(sanitizedPods, sanitizedPod)
|
||||
}
|
||||
|
||||
// Build a new node info.
|
||||
sanitizedNodeInfo := schedulernodeinfo.NewNodeInfo(sanitizedPods...)
|
||||
sanitizedNodeInfo := schedulerframework.NewNodeInfo(sanitizedPods...)
|
||||
if err := sanitizedNodeInfo.SetNode(sanitizedNode); err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestGetNodeInfosForGroups(t *testing.T) {
|
||||
|
|
@ -43,13 +43,13 @@ func TestGetNodeInfosForGroups(t *testing.T) {
|
|||
SetNodeReadyState(unready4, false, time.Now())
|
||||
|
||||
tn := BuildTestNode("tn", 5000, 5000)
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni := schedulerframework.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
// Cloud provider with TemplateNodeInfo implemented.
|
||||
provider1 := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
nil, nil, nil, nil, nil,
|
||||
map[string]*schedulernodeinfo.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
map[string]*schedulerframework.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node.
|
||||
provider1.AddNode("ng1", ready1)
|
||||
provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node.
|
||||
|
|
@ -108,7 +108,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
SetNodeReadyState(ready6, true, time.Now())
|
||||
|
||||
tn := BuildTestNode("tn", 10000, 10000)
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni := schedulerframework.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
lastDeletedGroup := ""
|
||||
|
|
@ -120,7 +120,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
// Cloud provider with TemplateNodeInfo implemented.
|
||||
provider1 := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
nil, nil, nil, onDeleteGroup, nil,
|
||||
map[string]*schedulernodeinfo.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
map[string]*schedulerframework.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node.
|
||||
provider1.AddNode("ng1", ready1)
|
||||
provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node.
|
||||
|
|
@ -138,7 +138,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
predicateChecker, err := simulator.NewTestPredicateChecker()
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodeInfoCache := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
nodeInfoCache := make(map[string]*schedulerframework.NodeInfo)
|
||||
|
||||
// Fill cache
|
||||
res, err := GetNodeInfosForGroups([]*apiv1.Node{unready4, unready3, ready2, ready1}, nodeInfoCache,
|
||||
|
|
@ -196,10 +196,10 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
assert.False(t, found)
|
||||
|
||||
// Fill cache manually
|
||||
infoNg4Node6 := schedulernodeinfo.NewNodeInfo()
|
||||
infoNg4Node6 := schedulerframework.NewNodeInfo()
|
||||
err2 := infoNg4Node6.SetNode(ready6.DeepCopy())
|
||||
assert.NoError(t, err2)
|
||||
nodeInfoCache = map[string]*schedulernodeinfo.NodeInfo{"ng4": infoNg4Node6}
|
||||
nodeInfoCache = map[string]*schedulerframework.NodeInfo{"ng4": infoNg4Node6}
|
||||
// Check if cache was used
|
||||
res, err = GetNodeInfosForGroups([]*apiv1.Node{ready1, ready2}, nodeInfoCache,
|
||||
provider1, registry, []*appsv1.DaemonSet{}, predicateChecker, nil)
|
||||
|
|
@ -225,12 +225,12 @@ func TestSanitizeNodeInfo(t *testing.T) {
|
|||
|
||||
node := BuildTestNode("node", 1000, 1000)
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pod)
|
||||
nodeInfo := schedulerframework.NewNodeInfo(pod)
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
res, err := sanitizeNodeInfo(nodeInfo, "test-group", nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(res.Pods()))
|
||||
assert.Equal(t, 1, len(res.Pods))
|
||||
}
|
||||
|
||||
func TestSanitizeLabels(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// podInfo contains Pod and score that corresponds to how important it is to handle the pod first.
|
||||
|
|
@ -59,7 +59,7 @@ func NewBinpackingNodeEstimator(
|
|||
// Returns the number of nodes needed to accommodate all pods from the list.
|
||||
func (estimator *BinpackingNodeEstimator) Estimate(
|
||||
pods []*apiv1.Pod,
|
||||
nodeTemplate *schedulernodeinfo.NodeInfo) int {
|
||||
nodeTemplate *schedulerframework.NodeInfo) int {
|
||||
podInfos := calculatePodScore(pods, nodeTemplate)
|
||||
sort.Slice(podInfos, func(i, j int) bool { return podInfos[i].score > podInfos[j].score })
|
||||
|
||||
|
|
@ -110,7 +110,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(
|
|||
}
|
||||
|
||||
func (estimator *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
||||
template *schedulernodeinfo.NodeInfo,
|
||||
template *schedulerframework.NodeInfo,
|
||||
nameTimestamp time.Time,
|
||||
nameIndex int) (string, error) {
|
||||
|
||||
|
|
@ -120,7 +120,11 @@ func (estimator *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
|||
newNode.Labels = make(map[string]string)
|
||||
}
|
||||
newNode.Labels["kubernetes.io/hostname"] = newNode.Name
|
||||
if err := estimator.clusterSnapshot.AddNodeWithPods(newNode, template.Pods()); err != nil {
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range template.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
if err := estimator.clusterSnapshot.AddNodeWithPods(newNode, pods); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return newNode.Name, nil
|
||||
|
|
@ -129,7 +133,7 @@ func (estimator *BinpackingNodeEstimator) addNewNodeToSnapshot(
|
|||
// Calculates score for all pods and returns podInfo structure.
|
||||
// Score is defined as cpu_sum/node_capacity + mem_sum/node_capacity.
|
||||
// Pods that have bigger requirements should be processed first, thus have higher scores.
|
||||
func calculatePodScore(pods []*apiv1.Pod, nodeTemplate *schedulernodeinfo.NodeInfo) []*podInfo {
|
||||
func calculatePodScore(pods []*apiv1.Pod, nodeTemplate *schedulerframework.NodeInfo) []*podInfo {
|
||||
podInfos := make([]*podInfo, 0, len(pods))
|
||||
|
||||
for _, pod := range pods {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -70,7 +70,7 @@ func TestBinpackingEstimate(t *testing.T) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
estimate := estimator.Estimate(pods, nodeInfo)
|
||||
assert.Equal(t, 5, estimate)
|
||||
|
|
@ -103,7 +103,7 @@ func TestBinpackingEstimateWithPorts(t *testing.T) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
estimate := estimator.Estimate(pods, nodeInfo)
|
||||
assert.Equal(t, 8, estimate)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -34,7 +34,7 @@ var AvailableEstimators = []string{BinpackingEstimatorName}
|
|||
|
||||
// Estimator calculates the number of nodes of given type needed to schedule pods.
|
||||
type Estimator interface {
|
||||
Estimate([]*apiv1.Pod, *schedulernodeinfo.NodeInfo) int
|
||||
Estimate([]*apiv1.Pod, *schedulerframework.NodeInfo) int
|
||||
}
|
||||
|
||||
// EstimatorBuilder creates a new estimator object.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package expander
|
|||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -48,5 +48,5 @@ type Option struct {
|
|||
|
||||
// Strategy describes an interface for selecting the best option when scaling up
|
||||
type Strategy interface {
|
||||
BestOption(options []Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *Option
|
||||
BestOption(options []Option, nodeInfo map[string]*schedulerframework.NodeInfo) *Option
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package mostpods
|
|||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type mostpods struct {
|
||||
|
|
@ -32,7 +32,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// BestOption Selects the expansion option that schedules the most pods
|
||||
func (m *mostpods) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
func (m *mostpods) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
var maxPods int
|
||||
var maxOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -87,7 +87,7 @@ func NewStrategy(cloudProvider cloudprovider.CloudProvider,
|
|||
}
|
||||
|
||||
// BestOption selects option based on cost and preferred node type.
|
||||
func (p *priceBased) BestOption(expansionOptions []expander.Option, nodeInfos map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
func (p *priceBased) BestOption(expansionOptions []expander.Option, nodeInfos map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
var bestOption *expander.Option
|
||||
bestOptionScore := 0.0
|
||||
now := time.Now()
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -77,13 +77,13 @@ func TestPriceExpander(t *testing.T) {
|
|||
ng2, _ := provider.NodeGroupForNode(n2)
|
||||
ng3, _ := provider.NewNodeGroup("MT1", nil, nil, nil, nil)
|
||||
|
||||
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||
ni1 := schedulerframework.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
ni2 := schedulerframework.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
ni3 := schedulernodeinfo.NewNodeInfo()
|
||||
ni3 := schedulerframework.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
nodeInfosForGroups := map[string]*schedulernodeinfo.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2,
|
||||
}
|
||||
var pricingModel cloudprovider.PricingModel
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import (
|
|||
v1lister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -120,7 +120,7 @@ func (p *priority) parsePrioritiesYAMLString(prioritiesYAML string) (priorities,
|
|||
return newPriorities, nil
|
||||
}
|
||||
|
||||
func (p *priority) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
func (p *priority) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
if len(expansionOptions) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"math/rand"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type random struct {
|
||||
|
|
@ -32,7 +32,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// RandomExpansion Selects from the expansion options at random
|
||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
if len(expansionOptions) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
|
||||
"k8s.io/klog"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type leastwaste struct {
|
||||
|
|
@ -35,7 +35,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// BestOption Finds the option that wastes the least fraction of CPU and Memory
|
||||
func (l *leastwaste) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
func (l *leastwaste) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulerframework.NodeInfo) *expander.Option {
|
||||
var leastWastedScore float64
|
||||
var leastWastedOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
type FakeNodeGroup struct {
|
||||
|
|
@ -45,7 +45,7 @@ func (f *FakeNodeGroup) Debug() string { return f.id }
|
|||
func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||
return []cloudprovider.Instance{}, nil
|
||||
}
|
||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
func (f *FakeNodeGroup) Exist() bool { return true }
|
||||
|
|
@ -55,7 +55,7 @@ func (f *FakeNodeGroup) Create() (cloudprovider.NodeGroup, error) {
|
|||
func (f *FakeNodeGroup) Delete() error { return cloudprovider.ErrNotImplemented }
|
||||
func (f *FakeNodeGroup) Autoprovisioned() bool { return false }
|
||||
|
||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulernodeinfo.NodeInfo {
|
||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulerframework.NodeInfo {
|
||||
node := &apiv1.Node{
|
||||
Status: apiv1.NodeStatus{
|
||||
Capacity: apiv1.ResourceList{
|
||||
|
|
@ -68,7 +68,7 @@ func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulernodeinfo.NodeIn
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
return nodeInfo
|
||||
|
|
@ -79,7 +79,7 @@ func TestLeastWaste(t *testing.T) {
|
|||
memoryPerPod := int64(1000 * 1024 * 1024)
|
||||
e := NewStrategy()
|
||||
balancedNodeInfo := makeNodeInfo(16*cpuPerPod, 16*memoryPerPod, 100)
|
||||
nodeMap := map[string]*schedulernodeinfo.NodeInfo{"balanced": balancedNodeInfo}
|
||||
nodeMap := map[string]*schedulerframework.NodeInfo{"balanced": balancedNodeInfo}
|
||||
balancedOption := expander.Option{NodeGroup: &FakeNodeGroup{"balanced"}, NodeCount: 1}
|
||||
|
||||
// Test without any pods, one node info
|
||||
|
|
|
|||
|
|
@ -20,14 +20,14 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// NodeGroupListProcessor processes lists of NodeGroups considered in scale-up.
|
||||
type NodeGroupListProcessor interface {
|
||||
Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulernodeinfo.NodeInfo, error)
|
||||
nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error)
|
||||
CleanUp()
|
||||
}
|
||||
|
||||
|
|
@ -41,8 +41,8 @@ func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor {
|
|||
}
|
||||
|
||||
// Process processes lists of unschedulable and scheduled pods before scaling of the cluster.
|
||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulernodeinfo.NodeInfo, error) {
|
||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulerframework.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulerframework.NodeInfo, error) {
|
||||
return nodeGroups, nodeInfos, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package nodegroupset
|
||||
|
||||
import (
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// CreateAwsNodeInfoComparator returns a comparator that checks if two nodes should be considered
|
||||
|
|
@ -40,7 +40,7 @@ func CreateAwsNodeInfoComparator(extraIgnoredLabels []string) NodeInfoComparator
|
|||
awsIgnoredLabels[k] = true
|
||||
}
|
||||
|
||||
return func(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
return IsCloudProviderNodeInfoSimilar(n1, n2, awsIgnoredLabels)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@ limitations under the License.
|
|||
package nodegroupset
|
||||
|
||||
import (
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// AzureNodepoolLabel is a label specifying which Azure node pool a particular node belongs to.
|
||||
const AzureNodepoolLabel = "agentpool"
|
||||
|
||||
func nodesFromSameAzureNodePool(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
func nodesFromSameAzureNodePool(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
n1AzureNodePool := n1.Node().Labels[AzureNodepoolLabel]
|
||||
n2AzureNodePool := n2.Node().Labels[AzureNodepoolLabel]
|
||||
return n1AzureNodePool != "" && n1AzureNodePool == n2AzureNodePool
|
||||
|
|
@ -42,7 +42,7 @@ func CreateAzureNodeInfoComparator(extraIgnoredLabels []string) NodeInfoComparat
|
|||
azureIgnoredLabels[k] = true
|
||||
}
|
||||
|
||||
return func(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
if nodesFromSameAzureNodePool(n1, n2) {
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -88,12 +88,12 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) {
|
|||
provider.AddNode("ng1", n1)
|
||||
provider.AddNode("ng2", n2)
|
||||
|
||||
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||
ni1 := schedulerframework.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
ni2 := schedulerframework.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
|
||||
nodeInfosForGroups := map[string]*schedulernodeinfo.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2,
|
||||
}
|
||||
|
||||
|
|
@ -117,7 +117,7 @@ func TestFindSimilarNodeGroupsAzureByLabel(t *testing.T) {
|
|||
n3 := BuildTestNode("n1", 1000, 1000)
|
||||
provider.AddNodeGroup("ng3", 1, 10, 1)
|
||||
provider.AddNode("ng3", n3)
|
||||
ni3 := schedulernodeinfo.NewNodeInfo()
|
||||
ni3 := schedulerframework.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
nodeInfosForGroups["ng3"] = ni3
|
||||
ng3, _ := provider.NodeGroupForNode(n3)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -35,7 +35,7 @@ type BalancingNodeGroupSetProcessor struct {
|
|||
// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one using the
|
||||
// BalancingNodeGroupSetProcessor's comparator function.
|
||||
func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
|
||||
result := []cloudprovider.NodeGroup{}
|
||||
nodeGroupId := nodeGroup.Id()
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package nodegroupset
|
|||
import (
|
||||
"testing"
|
||||
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func buildBasicNodeGroups(context *context.AutoscalingContext) (*schedulernodeinfo.NodeInfo, *schedulernodeinfo.NodeInfo, *schedulernodeinfo.NodeInfo) {
|
||||
func buildBasicNodeGroups(context *context.AutoscalingContext) (*schedulerframework.NodeInfo, *schedulerframework.NodeInfo, *schedulerframework.NodeInfo) {
|
||||
n1 := BuildTestNode("n1", 1000, 1000)
|
||||
n2 := BuildTestNode("n2", 1000, 1000)
|
||||
n3 := BuildTestNode("n3", 2000, 2000)
|
||||
|
|
@ -41,11 +41,11 @@ func buildBasicNodeGroups(context *context.AutoscalingContext) (*schedulernodein
|
|||
provider.AddNode("ng2", n2)
|
||||
provider.AddNode("ng3", n3)
|
||||
|
||||
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||
ni1 := schedulerframework.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
ni2 := schedulerframework.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
ni3 := schedulernodeinfo.NewNodeInfo()
|
||||
ni3 := schedulerframework.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
|
||||
context.CloudProvider = provider
|
||||
|
|
@ -56,11 +56,11 @@ func basicSimilarNodeGroupsTest(
|
|||
t *testing.T,
|
||||
context *context.AutoscalingContext,
|
||||
processor NodeGroupSetProcessor,
|
||||
ni1 *schedulernodeinfo.NodeInfo,
|
||||
ni2 *schedulernodeinfo.NodeInfo,
|
||||
ni3 *schedulernodeinfo.NodeInfo,
|
||||
ni1 *schedulerframework.NodeInfo,
|
||||
ni2 *schedulerframework.NodeInfo,
|
||||
ni3 *schedulerframework.NodeInfo,
|
||||
) {
|
||||
nodeInfosForGroups := map[string]*schedulernodeinfo.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*schedulerframework.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2, "ng3": ni3,
|
||||
}
|
||||
|
||||
|
|
@ -103,7 +103,7 @@ func TestFindSimilarNodeGroupsCustomComparator(t *testing.T) {
|
|||
ni1, ni2, ni3 := buildBasicNodeGroups(context)
|
||||
|
||||
processor := &BalancingNodeGroupSetProcessor{
|
||||
Comparator: func(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
Comparator: func(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
return (n1.Node().Name == "n1" && n2.Node().Name == "n2") ||
|
||||
(n1.Node().Name == "n2" && n2.Node().Name == "n1")
|
||||
},
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -51,7 +51,7 @@ var BasicIgnoredLabels = map[string]bool{
|
|||
|
||||
// NodeInfoComparator is a function that tells if two nodes are from NodeGroups
|
||||
// similar enough to be considered a part of a single NodeGroupSet.
|
||||
type NodeInfoComparator func(n1, n2 *schedulernodeinfo.NodeInfo) bool
|
||||
type NodeInfoComparator func(n1, n2 *schedulerframework.NodeInfo) bool
|
||||
|
||||
func compareResourceMapsWithTolerance(resources map[apiv1.ResourceName][]resource.Quantity,
|
||||
maxDifferenceRatio float64) bool {
|
||||
|
|
@ -68,7 +68,7 @@ func compareResourceMapsWithTolerance(resources map[apiv1.ResourceName][]resourc
|
|||
return true
|
||||
}
|
||||
|
||||
func compareLabels(nodes []*schedulernodeinfo.NodeInfo, ignoredLabels map[string]bool) bool {
|
||||
func compareLabels(nodes []*schedulerframework.NodeInfo, ignoredLabels map[string]bool) bool {
|
||||
labels := make(map[string][]string)
|
||||
for _, node := range nodes {
|
||||
for label, value := range node.Node().ObjectMeta.Labels {
|
||||
|
|
@ -96,7 +96,7 @@ func CreateGenericNodeInfoComparator(extraIgnoredLabels []string) NodeInfoCompar
|
|||
genericIgnoredLabels[k] = true
|
||||
}
|
||||
|
||||
return func(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
return func(n1, n2 *schedulerframework.NodeInfo) bool {
|
||||
return IsCloudProviderNodeInfoSimilar(n1, n2, genericIgnoredLabels)
|
||||
}
|
||||
}
|
||||
|
|
@ -106,11 +106,11 @@ func CreateGenericNodeInfoComparator(extraIgnoredLabels []string) NodeInfoCompar
|
|||
// somewhat arbitrary, but generally we check if resources provided by both nodes
|
||||
// are similar enough to likely be the same type of machine and if the set of labels
|
||||
// is the same (except for a set of labels passed in to be ignored like hostname or zone).
|
||||
func IsCloudProviderNodeInfoSimilar(n1, n2 *schedulernodeinfo.NodeInfo, ignoredLabels map[string]bool) bool {
|
||||
func IsCloudProviderNodeInfoSimilar(n1, n2 *schedulerframework.NodeInfo, ignoredLabels map[string]bool) bool {
|
||||
capacity := make(map[apiv1.ResourceName][]resource.Quantity)
|
||||
allocatable := make(map[apiv1.ResourceName][]resource.Quantity)
|
||||
free := make(map[apiv1.ResourceName][]resource.Quantity)
|
||||
nodes := []*schedulernodeinfo.NodeInfo{n1, n2}
|
||||
nodes := []*schedulerframework.NodeInfo{n1, n2}
|
||||
for _, node := range nodes {
|
||||
for res, quantity := range node.Node().Status.Capacity {
|
||||
capacity[res] = append(capacity[res], quantity)
|
||||
|
|
@ -118,8 +118,7 @@ func IsCloudProviderNodeInfoSimilar(n1, n2 *schedulernodeinfo.NodeInfo, ignoredL
|
|||
for res, quantity := range node.Node().Status.Allocatable {
|
||||
allocatable[res] = append(allocatable[res], quantity)
|
||||
}
|
||||
requested := node.RequestedResource()
|
||||
for res, quantity := range (&requested).ResourceList() {
|
||||
for res, quantity := range node.Requested.ResourceList() {
|
||||
freeRes := node.Node().Status.Allocatable[res].DeepCopy()
|
||||
freeRes.Sub(quantity)
|
||||
free[res] = append(free[res], freeRes)
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -33,9 +33,9 @@ func checkNodesSimilar(t *testing.T, n1, n2 *apiv1.Node, comparator NodeInfoComp
|
|||
}
|
||||
|
||||
func checkNodesSimilarWithPods(t *testing.T, n1, n2 *apiv1.Node, pods1, pods2 []*apiv1.Pod, comparator NodeInfoComparator, shouldEqual bool) {
|
||||
ni1 := schedulernodeinfo.NewNodeInfo(pods1...)
|
||||
ni1 := schedulerframework.NewNodeInfo(pods1...)
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulernodeinfo.NewNodeInfo(pods2...)
|
||||
ni2 := schedulerframework.NewNodeInfo(pods2...)
|
||||
ni2.SetNode(n2)
|
||||
assert.Equal(t, shouldEqual, comparator(ni1, ni2))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// ScaleUpInfo contains information about planned scale-up of a single NodeGroup
|
||||
|
|
@ -45,7 +45,7 @@ func (s ScaleUpInfo) String() string {
|
|||
// NodeGroupSetProcessor finds nodegroups that are similar and allows balancing scale-up between them.
|
||||
type NodeGroupSetProcessor interface {
|
||||
FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError)
|
||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError)
|
||||
|
||||
BalanceScaleUpBetweenGroups(context *context.AutoscalingContext, groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError)
|
||||
CleanUp()
|
||||
|
|
@ -57,7 +57,7 @@ type NoOpNodeGroupSetProcessor struct {
|
|||
|
||||
// FindSimilarNodeGroups returns a list of NodeGroups similar to the one provided in parameter.
|
||||
func (n *NoOpNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
nodeInfosForGroups map[string]*schedulerframework.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
return []cloudprovider.NodeGroup{}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,13 +18,13 @@ package nodeinfos
|
|||
|
||||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// NodeInfoProcessor processes nodeInfos after they're created.
|
||||
type NodeInfoProcessor interface {
|
||||
// Process processes a map of nodeInfos for node groups.
|
||||
Process(ctx *context.AutoscalingContext, nodeInfosForNodeGroups map[string]*nodeinfo.NodeInfo) (map[string]*nodeinfo.NodeInfo, error)
|
||||
Process(ctx *context.AutoscalingContext, nodeInfosForNodeGroups map[string]*schedulerframework.NodeInfo) (map[string]*schedulerframework.NodeInfo, error)
|
||||
// CleanUp cleans up processor's internal structures.
|
||||
CleanUp()
|
||||
}
|
||||
|
|
@ -34,7 +34,7 @@ type NoOpNodeInfoProcessor struct {
|
|||
}
|
||||
|
||||
// Process returns unchanged nodeInfos.
|
||||
func (p *NoOpNodeInfoProcessor) Process(ctx *context.AutoscalingContext, nodeInfosForNodeGroups map[string]*nodeinfo.NodeInfo) (map[string]*nodeinfo.NodeInfo, error) {
|
||||
func (p *NoOpNodeInfoProcessor) Process(ctx *context.AutoscalingContext, nodeInfosForNodeGroups map[string]*schedulerframework.NodeInfo) (map[string]*schedulerframework.NodeInfo, error) {
|
||||
return nodeInfosForNodeGroups, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ On Cluster Autoscaler startup kubemark Cloud Provider will parse the config pass
|
|||
* `IncreaseSize(delta int)` - creation of #delta singleton Replication Controllers in external cluster with label `'autoscaling.k8s.io/nodegroup'=Name()`
|
||||
* `DeleteNodes([]*apiv1.Node)` - removal of specified Replication Controllers
|
||||
* `DecreaseTargetSize(delta int) error` - removal of Replication Controllers that have not yet been created
|
||||
* `TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error)` - will return ErrNotImplemented
|
||||
* `TemplateNodeInfo() (*schedulerframework.NodeInfo, error)` - will return ErrNotImplemented
|
||||
* `MaxSize()` - specified via config (`--nodes={MIN}:{MAX}:{NG_LABEL_VALUE}`)
|
||||
* `MinSize()` - specified via config
|
||||
|
||||
|
|
|
|||
|
|
@ -20,9 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// BasicClusterSnapshot is simple, reference implementation of ClusterSnapshot.
|
||||
|
|
@ -33,21 +31,21 @@ type BasicClusterSnapshot struct {
|
|||
}
|
||||
|
||||
type internalBasicSnapshotData struct {
|
||||
nodeInfoMap map[string]*schedulernodeinfo.NodeInfo
|
||||
nodeInfoMap map[string]*schedulerframework.NodeInfo
|
||||
}
|
||||
|
||||
func (data *internalBasicSnapshotData) listNodeInfos() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(data.nodeInfoMap))
|
||||
func (data *internalBasicSnapshotData) listNodeInfos() ([]*schedulerframework.NodeInfo, error) {
|
||||
nodeInfoList := make([]*schedulerframework.NodeInfo, 0, len(data.nodeInfoMap))
|
||||
for _, v := range data.nodeInfoMap {
|
||||
nodeInfoList = append(nodeInfoList, v)
|
||||
}
|
||||
return nodeInfoList, nil
|
||||
}
|
||||
|
||||
func (data *internalBasicSnapshotData) listNodeInfosThatHavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
havePodsWithAffinityList := make([]*schedulernodeinfo.NodeInfo, 0, len(data.nodeInfoMap))
|
||||
func (data *internalBasicSnapshotData) listNodeInfosThatHavePodsWithAffinityList() ([]*schedulerframework.NodeInfo, error) {
|
||||
havePodsWithAffinityList := make([]*schedulerframework.NodeInfo, 0, len(data.nodeInfoMap))
|
||||
for _, v := range data.nodeInfoMap {
|
||||
if len(v.PodsWithAffinity()) > 0 {
|
||||
if len(v.PodsWithAffinity) > 0 {
|
||||
havePodsWithAffinityList = append(havePodsWithAffinityList, v)
|
||||
}
|
||||
}
|
||||
|
|
@ -55,38 +53,21 @@ func (data *internalBasicSnapshotData) listNodeInfosThatHavePodsWithAffinityList
|
|||
return havePodsWithAffinityList, nil
|
||||
}
|
||||
|
||||
func (data *internalBasicSnapshotData) getNodeInfo(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (data *internalBasicSnapshotData) getNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) {
|
||||
if v, ok := data.nodeInfoMap[nodeName]; ok {
|
||||
return v, nil
|
||||
}
|
||||
return nil, errNodeNotFound
|
||||
}
|
||||
|
||||
func (data *internalBasicSnapshotData) listPods(selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
alwaysTrue := func(p *apiv1.Pod) bool { return true }
|
||||
return data.filteredListPods(alwaysTrue, selector)
|
||||
}
|
||||
|
||||
func (data *internalBasicSnapshotData) filteredListPods(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
pods := make([]*apiv1.Pod, 0)
|
||||
for _, n := range data.nodeInfoMap {
|
||||
for _, pod := range n.Pods() {
|
||||
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func newInternalBasicSnapshotData() *internalBasicSnapshotData {
|
||||
return &internalBasicSnapshotData{
|
||||
nodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
nodeInfoMap: make(map[string]*schedulerframework.NodeInfo),
|
||||
}
|
||||
}
|
||||
|
||||
func (data *internalBasicSnapshotData) clone() *internalBasicSnapshotData {
|
||||
clonedNodeInfoMap := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
clonedNodeInfoMap := make(map[string]*schedulerframework.NodeInfo)
|
||||
for k, v := range data.nodeInfoMap {
|
||||
clonedNodeInfoMap[k] = v.Clone()
|
||||
}
|
||||
|
|
@ -99,7 +80,7 @@ func (data *internalBasicSnapshotData) addNode(node *apiv1.Node) error {
|
|||
if _, found := data.nodeInfoMap[node.Name]; found {
|
||||
return fmt.Errorf("node %s already in snapshot", node.Name)
|
||||
}
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
err := nodeInfo.SetNode(node)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot set node in NodeInfo; %v", err)
|
||||
|
|
@ -138,9 +119,9 @@ func (data *internalBasicSnapshotData) removePod(namespace, podName, nodeName st
|
|||
if !found {
|
||||
return errNodeNotFound
|
||||
}
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
if pod.Namespace == namespace && pod.Name == podName {
|
||||
err := nodeInfo.RemovePod(pod)
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
if podInfo.Pod.Namespace == namespace && podInfo.Pod.Name == podName {
|
||||
err := nodeInfo.RemovePod(podInfo.Pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot remove pod; %v", err)
|
||||
}
|
||||
|
|
@ -238,39 +219,23 @@ func (snapshot *BasicClusterSnapshot) Clear() {
|
|||
// implementation of SharedLister interface
|
||||
|
||||
type basicClusterSnapshotNodeLister BasicClusterSnapshot
|
||||
type basicClusterSnapshotPodLister BasicClusterSnapshot
|
||||
|
||||
// Pods exposes snapshot as PodLister
|
||||
func (snapshot *BasicClusterSnapshot) Pods() schedulerlisters.PodLister {
|
||||
return (*basicClusterSnapshotPodLister)(snapshot)
|
||||
}
|
||||
|
||||
// List returns the list of pods in the snapshot.
|
||||
func (snapshot *basicClusterSnapshotPodLister) List(selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
return (*BasicClusterSnapshot)(snapshot).getInternalData().listPods(selector)
|
||||
}
|
||||
|
||||
// FilteredList returns a filtered list of pods in the snapshot.
|
||||
func (snapshot *basicClusterSnapshotPodLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
return (*BasicClusterSnapshot)(snapshot).getInternalData().filteredListPods(podFilter, selector)
|
||||
}
|
||||
|
||||
// NodeInfos exposes snapshot as NodeInfoLister.
|
||||
func (snapshot *BasicClusterSnapshot) NodeInfos() schedulerlisters.NodeInfoLister {
|
||||
func (snapshot *BasicClusterSnapshot) NodeInfos() schedulerframework.NodeInfoLister {
|
||||
return (*basicClusterSnapshotNodeLister)(snapshot)
|
||||
}
|
||||
|
||||
// List returns the list of nodes in the snapshot.
|
||||
func (snapshot *basicClusterSnapshotNodeLister) List() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *basicClusterSnapshotNodeLister) List() ([]*schedulerframework.NodeInfo, error) {
|
||||
return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfos()
|
||||
}
|
||||
|
||||
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
|
||||
func (snapshot *basicClusterSnapshotNodeLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *basicClusterSnapshotNodeLister) HavePodsWithAffinityList() ([]*schedulerframework.NodeInfo, error) {
|
||||
return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfosThatHavePodsWithAffinityList()
|
||||
}
|
||||
|
||||
// Returns the NodeInfo of the given node name.
|
||||
func (snapshot *basicClusterSnapshotNodeLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *basicClusterSnapshotNodeLister) Get(nodeName string) (*schedulerframework.NodeInfo, error) {
|
||||
return (*BasicClusterSnapshot)(snapshot).getInternalData().getNodeInfo(nodeName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -215,7 +215,7 @@ func FindEmptyNodesToRemove(snapshot ClusterSnapshot, candidates []string) []str
|
|||
// CalculateUtilization calculates utilization of a node, defined as maximum of (cpu, memory) or gpu utilization
|
||||
// based on if the node has GPU or not. Per resource utilization is the sum of requests for it divided by allocatable.
|
||||
// It also returns the individual cpu, memory and gpu utilization.
|
||||
func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulernodeinfo.NodeInfo, skipDaemonSetPods, skipMirrorPods bool, gpuLabel string) (utilInfo UtilizationInfo, err error) {
|
||||
func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulerframework.NodeInfo, skipDaemonSetPods, skipMirrorPods bool, gpuLabel string) (utilInfo UtilizationInfo, err error) {
|
||||
if gpu.NodeHasGpu(gpuLabel, node) {
|
||||
gpuUtil, err := calculateUtilizationOfResource(node, nodeInfo, gpu.ResourceNvidiaGPU, skipDaemonSetPods, skipMirrorPods)
|
||||
if err != nil {
|
||||
|
|
@ -250,7 +250,7 @@ func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulernodeinfo.NodeInfo
|
|||
return utilization, nil
|
||||
}
|
||||
|
||||
func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulernodeinfo.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) {
|
||||
func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulerframework.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) {
|
||||
nodeAllocatable, found := node.Status.Allocatable[resourceName]
|
||||
if !found {
|
||||
return 0, fmt.Errorf("failed to get %v from %s", resourceName, node.Name)
|
||||
|
|
@ -259,16 +259,16 @@ func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulernodeinf
|
|||
return 0, fmt.Errorf("%v is 0 at %s", resourceName, node.Name)
|
||||
}
|
||||
podsRequest := resource.MustParse("0")
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
// factor daemonset pods out of the utilization calculations
|
||||
if skipDaemonSetPods && pod_util.IsDaemonSetPod(pod) {
|
||||
if skipDaemonSetPods && pod_util.IsDaemonSetPod(podInfo.Pod) {
|
||||
continue
|
||||
}
|
||||
// factor mirror pods out of the utilization calculations
|
||||
if skipMirrorPods && pod_util.IsMirrorPod(pod) {
|
||||
if skipMirrorPods && pod_util.IsMirrorPod(podInfo.Pod) {
|
||||
continue
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, container := range podInfo.Pod.Spec.Containers {
|
||||
if resourceValue, found := container.Resources.Requests[resourceName]; found {
|
||||
podsRequest.Add(resourceValue)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,13 +20,13 @@ import (
|
|||
"errors"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// ClusterSnapshot is abstraction of cluster state used for predicate simulations.
|
||||
// It exposes mutation methods and can be viewed as scheduler's SharedLister.
|
||||
type ClusterSnapshot interface {
|
||||
schedulerlisters.SharedLister
|
||||
schedulerframework.SharedLister
|
||||
// AddNode adds node to the snapshot.
|
||||
AddNode(node *apiv1.Node) error
|
||||
// AddNodes adds nodes to the snapshot.
|
||||
|
|
|
|||
|
|
@ -154,13 +154,6 @@ func BenchmarkAddPods(b *testing.B) {
|
|||
if err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
/*
|
||||
// uncomment to test effect of pod caching
|
||||
_, err = clusterSnapshot.Pods().List(labels.Everything())
|
||||
if err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
*/
|
||||
b.StartTimer()
|
||||
for _, pod := range pods {
|
||||
err = clusterSnapshot.AddPod(pod, pod.Spec.NodeName)
|
||||
|
|
@ -271,74 +264,3 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBuildPodList(b *testing.B) {
|
||||
testCases := []struct {
|
||||
nodeCount int
|
||||
}{
|
||||
{
|
||||
nodeCount: 1000,
|
||||
},
|
||||
{
|
||||
nodeCount: 5000,
|
||||
},
|
||||
{
|
||||
nodeCount: 15000,
|
||||
},
|
||||
}
|
||||
for _, modifiedPodCount := range []int{0, 1, 100} {
|
||||
for _, extraNodeCount := range []int{0, 1, 100} {
|
||||
for _, tc := range testCases {
|
||||
b.Run(fmt.Sprintf("%s: modified %v, added nodes %v, nodes %v", "delta", modifiedPodCount, extraNodeCount, tc.nodeCount), func(b *testing.B) {
|
||||
nodes := createTestNodes(tc.nodeCount)
|
||||
pods := createTestPods(tc.nodeCount * 30)
|
||||
assignPodsToNodes(pods, nodes)
|
||||
|
||||
modifiedPods := createTestPods(modifiedPodCount)
|
||||
assignPodsToNodes(modifiedPods, nodes)
|
||||
|
||||
newNodes := createTestNodesWithPrefix("new-", extraNodeCount)
|
||||
newPods := createTestPods(extraNodeCount * 30)
|
||||
assignPodsToNodes(newPods, newNodes)
|
||||
|
||||
snapshot := NewDeltaClusterSnapshot()
|
||||
|
||||
if err := snapshot.AddNodes(nodes); err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if err := snapshot.AddPod(pod, pod.Spec.NodeName); err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := snapshot.Fork(); err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
for _, pod := range modifiedPods {
|
||||
if err := snapshot.AddPod(pod, pod.Spec.NodeName); err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := snapshot.AddNodes(newNodes); err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
for _, pod := range newPods {
|
||||
if err := snapshot.AddPod(pod, pod.Spec.NodeName); err != nil {
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
list := snapshot.data.buildPodList()
|
||||
if len(list) != tc.nodeCount*30+modifiedPodCount+extraNodeCount*30 {
|
||||
assert.Equal(b, tc.nodeCount*30+modifiedPodCount+extraNodeCount*30, len(list))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,8 +25,7 @@ import (
|
|||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -44,7 +43,7 @@ func nodeNames(nodes []*apiv1.Node) []string {
|
|||
return names
|
||||
}
|
||||
|
||||
func extractNodes(nodeInfos []*schedulernodeinfo.NodeInfo) []*apiv1.Node {
|
||||
func extractNodes(nodeInfos []*schedulerframework.NodeInfo) []*apiv1.Node {
|
||||
nodes := []*apiv1.Node{}
|
||||
for _, ni := range nodeInfos {
|
||||
nodes = append(nodes, ni.Node())
|
||||
|
|
@ -65,8 +64,12 @@ func compareStates(t *testing.T, a, b snapshotState) {
|
|||
func getSnapshotState(t *testing.T, snapshot ClusterSnapshot) snapshotState {
|
||||
nodes, err := snapshot.NodeInfos().List()
|
||||
assert.NoError(t, err)
|
||||
pods, err := snapshot.Pods().List(labels.Everything())
|
||||
assert.NoError(t, err)
|
||||
var pods []*apiv1.Pod
|
||||
for _, nodeInfo := range nodes {
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
}
|
||||
return snapshotState{extractNodes(nodes), pods}
|
||||
}
|
||||
|
||||
|
|
@ -213,9 +216,7 @@ func TestForking(t *testing.T) {
|
|||
snapshot := startSnapshot(t, snapshotFactory, tc.state)
|
||||
|
||||
// Allow caches to be build.
|
||||
_, err := snapshot.Pods().List(labels.Everything())
|
||||
assert.NoError(t, err)
|
||||
_, err = snapshot.NodeInfos().List()
|
||||
_, err := snapshot.NodeInfos().List()
|
||||
assert.NoError(t, err)
|
||||
_, err = snapshot.NodeInfos().HavePodsWithAffinityList()
|
||||
assert.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -36,7 +36,7 @@ func TestUtilization(t *testing.T) {
|
|||
pod := BuildTestPod("p1", 100, 200000)
|
||||
pod2 := BuildTestPod("p2", -1, -1)
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pod, pod, pod2)
|
||||
nodeInfo := schedulerframework.NewNodeInfo(pod, pod, pod2)
|
||||
node := BuildTestNode("node1", 2000, 2000000)
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
|
|
@ -56,12 +56,12 @@ func TestUtilization(t *testing.T) {
|
|||
daemonSetPod4.OwnerReferences = GenerateOwnerReferences("ds", "CustomDaemonSet", "crd/v1", "")
|
||||
daemonSetPod4.Annotations = map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"}
|
||||
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, pod2, daemonSetPod3, daemonSetPod4)
|
||||
nodeInfo = schedulerframework.NewNodeInfo(pod, pod, pod2, daemonSetPod3, daemonSetPod4)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, true, false, gpuLabel)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod2, daemonSetPod3)
|
||||
nodeInfo = schedulerframework.NewNodeInfo(pod, pod2, daemonSetPod3)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, false, false, gpuLabel)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
|
@ -71,12 +71,12 @@ func TestUtilization(t *testing.T) {
|
|||
types.ConfigMirrorAnnotationKey: "",
|
||||
}
|
||||
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, pod2, mirrorPod4)
|
||||
nodeInfo = schedulerframework.NewNodeInfo(pod, pod, pod2, mirrorPod4)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, false, true, gpuLabel)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod2, mirrorPod4)
|
||||
nodeInfo = schedulerframework.NewNodeInfo(pod, pod2, mirrorPod4)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, false, false, gpuLabel)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
|
@ -86,7 +86,7 @@ func TestUtilization(t *testing.T) {
|
|||
gpuPod := BuildTestPod("gpu_pod", 100, 200000)
|
||||
RequestGpuForPod(gpuPod, 1)
|
||||
TolerateGpuForPod(gpuPod)
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, gpuPod)
|
||||
nodeInfo = schedulerframework.NewNodeInfo(pod, pod, gpuPod)
|
||||
utilInfo, err = CalculateUtilization(gpuNode, nodeInfo, false, false, gpuLabel)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 1/1, utilInfo.Utilization, 0.01)
|
||||
|
|
@ -94,16 +94,16 @@ func TestUtilization(t *testing.T) {
|
|||
// Node with Unready GPU
|
||||
gpuNode = BuildTestNode("gpu_node", 2000, 2000000)
|
||||
AddGpuLabelToNode(gpuNode)
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod)
|
||||
nodeInfo = schedulerframework.NewNodeInfo(pod, pod)
|
||||
utilInfo, err = CalculateUtilization(gpuNode, nodeInfo, false, false, gpuLabel)
|
||||
assert.NoError(t, err)
|
||||
assert.Zero(t, utilInfo.Utilization)
|
||||
}
|
||||
|
||||
func nodeInfos(nodes []*apiv1.Node) []*schedulernodeinfo.NodeInfo {
|
||||
result := make([]*schedulernodeinfo.NodeInfo, len(nodes))
|
||||
func nodeInfos(nodes []*apiv1.Node) []*schedulerframework.NodeInfo {
|
||||
result := make([]*schedulerframework.NodeInfo, len(nodes))
|
||||
for i, node := range nodes {
|
||||
ni := schedulernodeinfo.NewNodeInfo()
|
||||
ni := schedulerframework.NewNodeInfo()
|
||||
ni.SetNode(node)
|
||||
result[i] = ni
|
||||
}
|
||||
|
|
@ -262,22 +262,22 @@ type findNodesToRemoveTestConfig struct {
|
|||
|
||||
func TestFindNodesToRemove(t *testing.T) {
|
||||
emptyNode := BuildTestNode("n1", 1000, 2000000)
|
||||
emptyNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
emptyNodeInfo := schedulerframework.NewNodeInfo()
|
||||
emptyNodeInfo.SetNode(emptyNode)
|
||||
|
||||
// two small pods backed by ReplicaSet
|
||||
drainableNode := BuildTestNode("n2", 1000, 2000000)
|
||||
drainableNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
drainableNodeInfo := schedulerframework.NewNodeInfo()
|
||||
drainableNodeInfo.SetNode(drainableNode)
|
||||
|
||||
// one small pod, not backed by anything
|
||||
nonDrainableNode := BuildTestNode("n3", 1000, 2000000)
|
||||
nonDrainableNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nonDrainableNodeInfo := schedulerframework.NewNodeInfo()
|
||||
nonDrainableNodeInfo.SetNode(nonDrainableNode)
|
||||
|
||||
// one very large pod
|
||||
fullNode := BuildTestNode("n4", 1000, 2000000)
|
||||
fullNodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
fullNodeInfo := schedulerframework.NewNodeInfo()
|
||||
fullNodeInfo.SetNode(fullNode)
|
||||
|
||||
SetNodeReadyState(emptyNode, true, time.Time{})
|
||||
|
|
|
|||
|
|
@ -18,16 +18,13 @@ package simulator
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
scheduler_listers "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
scheduler_nodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// DelegatingSchedulerSharedLister is an implementation of scheduler.SharedLister which
|
||||
// passes logic to delegate. Delegate can be updated.
|
||||
type DelegatingSchedulerSharedLister struct {
|
||||
delegate scheduler_listers.SharedLister
|
||||
delegate schedulerframework.SharedLister
|
||||
}
|
||||
|
||||
// NewDelegatingSchedulerSharedLister creates new NewDelegatingSchedulerSharedLister
|
||||
|
|
@ -37,18 +34,13 @@ func NewDelegatingSchedulerSharedLister() *DelegatingSchedulerSharedLister {
|
|||
}
|
||||
}
|
||||
|
||||
// Pods returns a PodLister
|
||||
func (lister *DelegatingSchedulerSharedLister) Pods() scheduler_listers.PodLister {
|
||||
return lister.delegate.Pods()
|
||||
}
|
||||
|
||||
// NodeInfos returns a NodeInfoLister.
|
||||
func (lister *DelegatingSchedulerSharedLister) NodeInfos() scheduler_listers.NodeInfoLister {
|
||||
func (lister *DelegatingSchedulerSharedLister) NodeInfos() schedulerframework.NodeInfoLister {
|
||||
return lister.delegate.NodeInfos()
|
||||
}
|
||||
|
||||
// UpdateDelegate updates the delegate
|
||||
func (lister *DelegatingSchedulerSharedLister) UpdateDelegate(delegate scheduler_listers.SharedLister) {
|
||||
func (lister *DelegatingSchedulerSharedLister) UpdateDelegate(delegate schedulerframework.SharedLister) {
|
||||
lister.delegate = delegate
|
||||
}
|
||||
|
||||
|
|
@ -58,41 +50,25 @@ func (lister *DelegatingSchedulerSharedLister) ResetDelegate() {
|
|||
}
|
||||
|
||||
type unsetSharedLister struct{}
|
||||
type unsetPodLister unsetSharedLister
|
||||
type unsetNodeInfoLister unsetSharedLister
|
||||
|
||||
// List always returns an error
|
||||
func (lister *unsetPodLister) List(labels.Selector) ([]*apiv1.Pod, error) {
|
||||
return nil, fmt.Errorf("lister not set in delegate")
|
||||
}
|
||||
|
||||
// FilteredList always returns an error
|
||||
func (lister *unsetPodLister) FilteredList(podFilter scheduler_listers.PodFilter, selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
return nil, fmt.Errorf("lister not set in delegate")
|
||||
}
|
||||
|
||||
// List always returns an error
|
||||
func (lister *unsetNodeInfoLister) List() ([]*scheduler_nodeinfo.NodeInfo, error) {
|
||||
func (lister *unsetNodeInfoLister) List() ([]*schedulerframework.NodeInfo, error) {
|
||||
return nil, fmt.Errorf("lister not set in delegate")
|
||||
}
|
||||
|
||||
// HavePodsWithAffinityList always returns an error
|
||||
func (lister *unsetNodeInfoLister) HavePodsWithAffinityList() ([]*scheduler_nodeinfo.NodeInfo, error) {
|
||||
func (lister *unsetNodeInfoLister) HavePodsWithAffinityList() ([]*schedulerframework.NodeInfo, error) {
|
||||
return nil, fmt.Errorf("lister not set in delegate")
|
||||
}
|
||||
|
||||
// Get always returns an error
|
||||
func (lister *unsetNodeInfoLister) Get(nodeName string) (*scheduler_nodeinfo.NodeInfo, error) {
|
||||
func (lister *unsetNodeInfoLister) Get(nodeName string) (*schedulerframework.NodeInfo, error) {
|
||||
return nil, fmt.Errorf("lister not set in delegate")
|
||||
}
|
||||
|
||||
// Pods returns a fake PodLister which always returns an error
|
||||
func (lister *unsetSharedLister) Pods() scheduler_listers.PodLister {
|
||||
return (*unsetPodLister)(lister)
|
||||
}
|
||||
|
||||
// Pods returns a fake NodeInfoLister which always returns an error
|
||||
func (lister *unsetSharedLister) NodeInfos() scheduler_listers.NodeInfoLister {
|
||||
func (lister *unsetSharedLister) NodeInfos() schedulerframework.NodeInfoLister {
|
||||
return (*unsetNodeInfoLister)(lister)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,9 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// DeltaClusterSnapshot is an implementation of ClusterSnapshot optimized for typical Cluster Autoscaler usage - (fork, add stuff, revert), repeated many times per loop.
|
||||
|
|
@ -46,29 +44,27 @@ type DeltaClusterSnapshot struct {
|
|||
}
|
||||
|
||||
type deltaSnapshotNodeLister DeltaClusterSnapshot
|
||||
type deltaSnapshotPodLister DeltaClusterSnapshot
|
||||
|
||||
type internalDeltaSnapshotData struct {
|
||||
baseData *internalDeltaSnapshotData
|
||||
|
||||
addedNodeInfoMap map[string]*schedulernodeinfo.NodeInfo
|
||||
modifiedNodeInfoMap map[string]*schedulernodeinfo.NodeInfo
|
||||
addedNodeInfoMap map[string]*schedulerframework.NodeInfo
|
||||
modifiedNodeInfoMap map[string]*schedulerframework.NodeInfo
|
||||
deletedNodeInfos map[string]bool
|
||||
|
||||
nodeInfoList []*schedulernodeinfo.NodeInfo
|
||||
podList []*apiv1.Pod
|
||||
havePodsWithAffinity []*schedulernodeinfo.NodeInfo
|
||||
nodeInfoList []*schedulerframework.NodeInfo
|
||||
havePodsWithAffinity []*schedulerframework.NodeInfo
|
||||
}
|
||||
|
||||
func newInternalDeltaSnapshotData() *internalDeltaSnapshotData {
|
||||
return &internalDeltaSnapshotData{
|
||||
addedNodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
modifiedNodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
addedNodeInfoMap: make(map[string]*schedulerframework.NodeInfo),
|
||||
modifiedNodeInfoMap: make(map[string]*schedulerframework.NodeInfo),
|
||||
deletedNodeInfos: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) getNodeInfo(name string) (*schedulernodeinfo.NodeInfo, bool) {
|
||||
func (data *internalDeltaSnapshotData) getNodeInfo(name string) (*schedulerframework.NodeInfo, bool) {
|
||||
if data == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
|
@ -81,7 +77,7 @@ func (data *internalDeltaSnapshotData) getNodeInfo(name string) (*schedulernodei
|
|||
return data.baseData.getNodeInfo(name)
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) getNodeInfoLocal(name string) (*schedulernodeinfo.NodeInfo, bool) {
|
||||
func (data *internalDeltaSnapshotData) getNodeInfoLocal(name string) (*schedulerframework.NodeInfo, bool) {
|
||||
if data == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
|
@ -94,7 +90,7 @@ func (data *internalDeltaSnapshotData) getNodeInfoLocal(name string) (*scheduler
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) getNodeInfoList() []*schedulernodeinfo.NodeInfo {
|
||||
func (data *internalDeltaSnapshotData) getNodeInfoList() []*schedulerframework.NodeInfo {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -105,13 +101,13 @@ func (data *internalDeltaSnapshotData) getNodeInfoList() []*schedulernodeinfo.No
|
|||
}
|
||||
|
||||
// Contains costly copying throughout the struct chain. Use wisely.
|
||||
func (data *internalDeltaSnapshotData) buildNodeInfoList() []*schedulernodeinfo.NodeInfo {
|
||||
func (data *internalDeltaSnapshotData) buildNodeInfoList() []*schedulerframework.NodeInfo {
|
||||
baseList := data.baseData.getNodeInfoList()
|
||||
totalLen := len(baseList) + len(data.addedNodeInfoMap)
|
||||
var nodeInfoList []*schedulernodeinfo.NodeInfo
|
||||
var nodeInfoList []*schedulerframework.NodeInfo
|
||||
|
||||
if len(data.deletedNodeInfos) > 0 || len(data.modifiedNodeInfoMap) > 0 {
|
||||
nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, totalLen)
|
||||
nodeInfoList = make([]*schedulerframework.NodeInfo, 0, totalLen)
|
||||
for _, bni := range baseList {
|
||||
if data.deletedNodeInfos[bni.Node().Name] {
|
||||
continue
|
||||
|
|
@ -123,7 +119,7 @@ func (data *internalDeltaSnapshotData) buildNodeInfoList() []*schedulernodeinfo.
|
|||
nodeInfoList = append(nodeInfoList, bni)
|
||||
}
|
||||
} else {
|
||||
nodeInfoList = make([]*schedulernodeinfo.NodeInfo, len(baseList), totalLen)
|
||||
nodeInfoList = make([]*schedulerframework.NodeInfo, len(baseList), totalLen)
|
||||
copy(nodeInfoList, baseList)
|
||||
}
|
||||
|
||||
|
|
@ -145,14 +141,14 @@ func (data *internalDeltaSnapshotData) addNodes(nodes []*apiv1.Node) error {
|
|||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) addNode(node *apiv1.Node) error {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
if err := nodeInfo.SetNode(node); err != nil {
|
||||
return fmt.Errorf("cannot set node in NodeInfo: %v", err)
|
||||
}
|
||||
return data.addNodeInfo(nodeInfo)
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) addNodeInfo(nodeInfo *schedulernodeinfo.NodeInfo) error {
|
||||
func (data *internalDeltaSnapshotData) addNodeInfo(nodeInfo *schedulerframework.NodeInfo) error {
|
||||
if _, found := data.getNodeInfo(nodeInfo.Node().Name); found {
|
||||
return fmt.Errorf("node %s already in snapshot", nodeInfo.Node().Name)
|
||||
}
|
||||
|
|
@ -168,7 +164,7 @@ func (data *internalDeltaSnapshotData) addNodeInfo(nodeInfo *schedulernodeinfo.N
|
|||
data.nodeInfoList = append(data.nodeInfoList, nodeInfo)
|
||||
}
|
||||
|
||||
if len(nodeInfo.Pods()) > 0 {
|
||||
if len(nodeInfo.Pods) > 0 {
|
||||
data.clearPodCaches()
|
||||
}
|
||||
|
||||
|
|
@ -181,7 +177,6 @@ func (data *internalDeltaSnapshotData) clearCaches() {
|
|||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) clearPodCaches() {
|
||||
data.podList = nil
|
||||
data.havePodsWithAffinity = nil
|
||||
}
|
||||
|
||||
|
|
@ -218,7 +213,7 @@ func (data *internalDeltaSnapshotData) removeNode(nodeName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) nodeInfoToModify(nodeName string) (*schedulernodeinfo.NodeInfo, bool) {
|
||||
func (data *internalDeltaSnapshotData) nodeInfoToModify(nodeName string) (*schedulerframework.NodeInfo, bool) {
|
||||
dni, found := data.getNodeInfoLocal(nodeName)
|
||||
if !found {
|
||||
if _, found := data.deletedNodeInfos[nodeName]; found {
|
||||
|
|
@ -258,9 +253,9 @@ func (data *internalDeltaSnapshotData) removePod(namespace, name, nodeName strin
|
|||
}
|
||||
|
||||
podFound := false
|
||||
for _, pod := range ni.Pods() {
|
||||
if pod.Namespace == namespace && pod.Name == name {
|
||||
if err := ni.RemovePod(pod); err != nil {
|
||||
for _, podInfo := range ni.Pods {
|
||||
if podInfo.Pod.Namespace == namespace && podInfo.Pod.Name == name {
|
||||
if err := ni.RemovePod(podInfo.Pod); err != nil {
|
||||
return fmt.Errorf("cannot remove pod; %v", err)
|
||||
}
|
||||
podFound = true
|
||||
|
|
@ -276,35 +271,6 @@ func (data *internalDeltaSnapshotData) removePod(namespace, name, nodeName strin
|
|||
return nil
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) getPodList() []*apiv1.Pod {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
if data.podList == nil {
|
||||
data.podList = data.buildPodList()
|
||||
}
|
||||
return data.podList
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) buildPodList() []*apiv1.Pod {
|
||||
if len(data.deletedNodeInfos) > 0 || len(data.modifiedNodeInfoMap) > 0 {
|
||||
podList := []*apiv1.Pod{}
|
||||
nodeInfos := data.getNodeInfoList()
|
||||
for _, ni := range nodeInfos {
|
||||
podList = append(podList, ni.Pods()...)
|
||||
}
|
||||
return podList
|
||||
}
|
||||
|
||||
basePodList := data.baseData.getPodList()
|
||||
podList := make([]*apiv1.Pod, len(basePodList), len(basePodList))
|
||||
copy(podList, basePodList)
|
||||
for _, ni := range data.addedNodeInfoMap {
|
||||
podList = append(podList, ni.Pods()...)
|
||||
}
|
||||
return podList
|
||||
}
|
||||
|
||||
func (data *internalDeltaSnapshotData) fork() *internalDeltaSnapshotData {
|
||||
forkedData := newInternalDeltaSnapshotData()
|
||||
forkedData.baseData = data
|
||||
|
|
@ -338,21 +304,21 @@ func (data *internalDeltaSnapshotData) commit() (*internalDeltaSnapshotData, err
|
|||
}
|
||||
|
||||
// List returns list of all node infos.
|
||||
func (snapshot *deltaSnapshotNodeLister) List() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *deltaSnapshotNodeLister) List() ([]*schedulerframework.NodeInfo, error) {
|
||||
return snapshot.data.getNodeInfoList(), nil
|
||||
}
|
||||
|
||||
// HavePodsWithAffinityList returns list of all node infos with pods that have affinity constrints.
|
||||
func (snapshot *deltaSnapshotNodeLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *deltaSnapshotNodeLister) HavePodsWithAffinityList() ([]*schedulerframework.NodeInfo, error) {
|
||||
data := snapshot.data
|
||||
if data.havePodsWithAffinity != nil {
|
||||
return data.havePodsWithAffinity, nil
|
||||
}
|
||||
|
||||
nodeInfoList := snapshot.data.getNodeInfoList()
|
||||
havePodsWithAffinityList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoList))
|
||||
havePodsWithAffinityList := make([]*schedulerframework.NodeInfo, 0, len(nodeInfoList))
|
||||
for _, node := range nodeInfoList {
|
||||
if len(node.PodsWithAffinity()) > 0 {
|
||||
if len(node.PodsWithAffinity) > 0 {
|
||||
havePodsWithAffinityList = append(havePodsWithAffinityList, node)
|
||||
}
|
||||
}
|
||||
|
|
@ -361,11 +327,11 @@ func (snapshot *deltaSnapshotNodeLister) HavePodsWithAffinityList() ([]*schedule
|
|||
}
|
||||
|
||||
// Get returns node info by node name.
|
||||
func (snapshot *deltaSnapshotNodeLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *deltaSnapshotNodeLister) Get(nodeName string) (*schedulerframework.NodeInfo, error) {
|
||||
return (*DeltaClusterSnapshot)(snapshot).getNodeInfo(nodeName)
|
||||
}
|
||||
|
||||
func (snapshot *DeltaClusterSnapshot) getNodeInfo(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
func (snapshot *DeltaClusterSnapshot) getNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) {
|
||||
data := snapshot.data
|
||||
node, found := data.getNodeInfo(nodeName)
|
||||
if !found {
|
||||
|
|
@ -374,50 +340,8 @@ func (snapshot *DeltaClusterSnapshot) getNodeInfo(nodeName string) (*schedulerno
|
|||
return node, nil
|
||||
}
|
||||
|
||||
// List returns all pods matching selector.
|
||||
func (snapshot *deltaSnapshotPodLister) List(selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
data := snapshot.data
|
||||
if data.podList == nil {
|
||||
data.podList = data.buildPodList()
|
||||
}
|
||||
|
||||
if selector.Empty() {
|
||||
// no restrictions, yay
|
||||
return data.podList, nil
|
||||
}
|
||||
|
||||
selectedPods := make([]*apiv1.Pod, 0, len(data.podList))
|
||||
for _, pod := range data.podList {
|
||||
if selector.Matches(labels.Set(pod.Labels)) {
|
||||
selectedPods = append(selectedPods, pod)
|
||||
}
|
||||
}
|
||||
return selectedPods, nil
|
||||
}
|
||||
|
||||
// FilteredList returns all pods matching selector and filter.
|
||||
func (snapshot *deltaSnapshotPodLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*apiv1.Pod, error) {
|
||||
data := snapshot.data
|
||||
if data.podList == nil {
|
||||
data.podList = data.buildPodList()
|
||||
}
|
||||
|
||||
selectedPods := make([]*apiv1.Pod, 0, len(data.podList))
|
||||
for _, pod := range data.podList {
|
||||
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
||||
selectedPods = append(selectedPods, pod)
|
||||
}
|
||||
}
|
||||
return selectedPods, nil
|
||||
}
|
||||
|
||||
// Pods returns pod lister.
|
||||
func (snapshot *DeltaClusterSnapshot) Pods() schedulerlisters.PodLister {
|
||||
return (*deltaSnapshotPodLister)(snapshot)
|
||||
}
|
||||
|
||||
// NodeInfos returns node lister.
|
||||
func (snapshot *DeltaClusterSnapshot) NodeInfos() schedulerlisters.NodeInfoLister {
|
||||
func (snapshot *DeltaClusterSnapshot) NodeInfos() schedulerframework.NodeInfoLister {
|
||||
return (*deltaSnapshotNodeLister)(snapshot)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// FastGetPodsToMove returns a list of pods that should be moved elsewhere if the node
|
||||
|
|
@ -34,10 +34,14 @@ import (
|
|||
// Based on kubectl drain code. It makes an assumption that RC, DS, Jobs and RS were deleted
|
||||
// along with their pods (no abandoned pods with dangling created-by annotation). Useful for fast
|
||||
// checks.
|
||||
func FastGetPodsToMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool,
|
||||
func FastGetPodsToMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool,
|
||||
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, *drain.BlockingPod, error) {
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
pods, blockingPod, err := drain.GetPodsForDeletionOnNodeDrain(
|
||||
nodeInfo.Pods(),
|
||||
pods,
|
||||
pdbs,
|
||||
skipNodesWithSystemPods,
|
||||
skipNodesWithLocalStorage,
|
||||
|
|
@ -60,11 +64,15 @@ func FastGetPodsToMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithSystem
|
|||
// is drained. Raises error if there is an unreplicated pod.
|
||||
// Based on kubectl drain code. It checks whether RC, DS, Jobs and RS that created these pods
|
||||
// still exist.
|
||||
func DetailedGetPodsForMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithSystemPods bool,
|
||||
func DetailedGetPodsForMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSystemPods bool,
|
||||
skipNodesWithLocalStorage bool, listers kube_util.ListerRegistry, minReplicaCount int32,
|
||||
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, *drain.BlockingPod, error) {
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
pods, blockingPod, err := drain.GetPodsForDeletionOnNodeDrain(
|
||||
nodeInfo.Pods(),
|
||||
pods,
|
||||
pdbs,
|
||||
skipNodesWithSystemPods,
|
||||
skipNodesWithLocalStorage,
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -40,7 +40,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
},
|
||||
}
|
||||
_, blockingPod, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod1), true, true, nil)
|
||||
_, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod1), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod1, Reason: drain.NotReplicated}, blockingPod)
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
r2, blockingPod, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod2), true, true, nil)
|
||||
r2, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod2), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r2))
|
||||
|
|
@ -68,7 +68,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
r3, blockingPod, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod3), true, true, nil)
|
||||
r3, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod3), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 0, len(r3))
|
||||
|
|
@ -81,7 +81,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("ds", "DaemonSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
r4, blockingPod, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod2, pod3, pod4), true, true, nil)
|
||||
r4, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod2, pod3, pod4), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r4))
|
||||
|
|
@ -95,7 +95,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod5), true, true, nil)
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod5), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod5, Reason: drain.UnmovableKubeSystemPod}, blockingPod)
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod6), true, true, nil)
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod6), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod6, Reason: drain.LocalStorageRequested}, blockingPod)
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
r7, blockingPod, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod7), true, true, nil)
|
||||
r7, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod7), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r7))
|
||||
|
|
@ -175,7 +175,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod8), true, true, []*policyv1.PodDisruptionBudget{pdb8})
|
||||
_, blockingPod, err = FastGetPodsToMove(schedulerframework.NewNodeInfo(pod8), true, true, []*policyv1.PodDisruptionBudget{pdb8})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, &drain.BlockingPod{Pod: pod8, Reason: drain.NotEnoughPdb}, blockingPod)
|
||||
|
||||
|
|
@ -209,7 +209,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
r9, blockingPod, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod9), true, true, []*policyv1.PodDisruptionBudget{pdb9})
|
||||
r9, blockingPod, err := FastGetPodsToMove(schedulerframework.NewNodeInfo(pod9), true, true, []*policyv1.PodDisruptionBudget{pdb9})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, blockingPod)
|
||||
assert.Equal(t, 1, len(r9))
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package simulator
|
|||
|
||||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
|
||||
|
|
@ -34,12 +34,12 @@ func getRequiredPodsForNode(nodename string, podsForNodes map[string][]*apiv1.Po
|
|||
}
|
||||
|
||||
// BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created.
|
||||
func BuildNodeInfoForNode(node *apiv1.Node, podsForNodes map[string][]*apiv1.Pod) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
func BuildNodeInfoForNode(node *apiv1.Node, podsForNodes map[string][]*apiv1.Pod) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
requiredPods, err := getRequiredPodsForNode(node.Name, podsForNodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := schedulernodeinfo.NewNodeInfo(requiredPods...)
|
||||
result := schedulerframework.NewNodeInfo(requiredPods...)
|
||||
if err := result.SetNode(node); err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,8 +29,7 @@ import (
|
|||
volume_scheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||
scheduler_apis_config "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
scheduler_plugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
scheduler_framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
scheduler_nodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
// We need to import provider to initialize default scheduler.
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
|
|
@ -39,7 +38,7 @@ import (
|
|||
// SchedulerBasedPredicateChecker checks whether all required predicates pass for given Pod and Node.
|
||||
// The verification is done by calling out to scheduler code.
|
||||
type SchedulerBasedPredicateChecker struct {
|
||||
framework scheduler_framework.Framework
|
||||
framework schedulerframework.Framework
|
||||
delegatingSharedLister *DelegatingSchedulerSharedLister
|
||||
nodeLister v1listers.NodeLister
|
||||
podLister v1listers.PodLister
|
||||
|
|
@ -62,13 +61,13 @@ func NewSchedulerBasedPredicateChecker(kubeClient kube_client.Interface, stop <-
|
|||
time.Duration(10)*time.Second,
|
||||
)
|
||||
|
||||
framework, err := scheduler_framework.NewFramework(
|
||||
framework, err := schedulerframework.NewFramework(
|
||||
scheduler_plugins.NewInTreeRegistry(),
|
||||
plugins,
|
||||
nil, // This is fine.
|
||||
scheduler_framework.WithInformerFactory(informerFactory),
|
||||
scheduler_framework.WithSnapshotSharedLister(sharedLister),
|
||||
scheduler_framework.WithVolumeBinder(volumeBinder),
|
||||
schedulerframework.WithInformerFactory(informerFactory),
|
||||
schedulerframework.WithSnapshotSharedLister(sharedLister),
|
||||
schedulerframework.WithVolumeBinder(volumeBinder),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -106,7 +105,7 @@ func (p *SchedulerBasedPredicateChecker) FitsAnyNode(clusterSnapshot ClusterSnap
|
|||
p.delegatingSharedLister.UpdateDelegate(clusterSnapshot)
|
||||
defer p.delegatingSharedLister.ResetDelegate()
|
||||
|
||||
state := scheduler_framework.NewCycleState()
|
||||
state := schedulerframework.NewCycleState()
|
||||
preFilterStatus := p.framework.RunPreFilterPlugins(context.TODO(), state, pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
return "", fmt.Errorf("error running pre filter plugins for pod %s; %s", pod.Name, preFilterStatus.Message())
|
||||
|
|
@ -147,7 +146,7 @@ func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot Cluster
|
|||
p.delegatingSharedLister.UpdateDelegate(clusterSnapshot)
|
||||
defer p.delegatingSharedLister.ResetDelegate()
|
||||
|
||||
state := scheduler_framework.NewCycleState()
|
||||
state := schedulerframework.NewCycleState()
|
||||
preFilterStatus := p.framework.RunPreFilterPlugins(context.TODO(), state, pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
return NewPredicateError(
|
||||
|
|
@ -180,7 +179,7 @@ func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot Cluster
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *SchedulerBasedPredicateChecker) buildDebugInfo(filterName string, nodeInfo *scheduler_nodeinfo.NodeInfo) func() string {
|
||||
func (p *SchedulerBasedPredicateChecker) buildDebugInfo(filterName string, nodeInfo *schedulerframework.NodeInfo) func() string {
|
||||
switch filterName {
|
||||
case "TaintToleration":
|
||||
taints := nodeInfo.Node().Spec.Taints
|
||||
|
|
|
|||
|
|
@ -20,17 +20,17 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// Backoff allows time-based backing off of node groups considered in scale up algorithm
|
||||
type Backoff interface {
|
||||
// Backoff execution for the given node group. Returns time till execution is backed off.
|
||||
Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time
|
||||
Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time
|
||||
// IsBackedOff returns true if execution is backed off for the given node group.
|
||||
IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, currentTime time.Time) bool
|
||||
IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, currentTime time.Time) bool
|
||||
// RemoveBackoff removes backoff data for the given node group.
|
||||
RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo)
|
||||
RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo)
|
||||
// RemoveStaleBackoffData removes stale backoff data.
|
||||
RemoveStaleBackoffData(currentTime time.Time)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// Backoff handles backing off executions.
|
||||
|
|
@ -66,7 +66,7 @@ func NewIdBasedExponentialBackoff(initialBackoffDuration time.Duration, maxBacko
|
|||
}
|
||||
|
||||
// Backoff execution for the given node group. Returns time till execution is backed off.
|
||||
func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time {
|
||||
func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time {
|
||||
duration := b.initialBackoffDuration
|
||||
key := b.nodeGroupKey(nodeGroup)
|
||||
if backoffInfo, found := b.backoffInfo[key]; found {
|
||||
|
|
@ -92,13 +92,13 @@ func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo
|
|||
}
|
||||
|
||||
// IsBackedOff returns true if execution is backed off for the given node group.
|
||||
func (b *exponentialBackoff) IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, currentTime time.Time) bool {
|
||||
func (b *exponentialBackoff) IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo, currentTime time.Time) bool {
|
||||
backoffInfo, found := b.backoffInfo[b.nodeGroupKey(nodeGroup)]
|
||||
return found && backoffInfo.backoffUntil.After(currentTime)
|
||||
}
|
||||
|
||||
// RemoveBackoff removes backoff data for the given node group.
|
||||
func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo) {
|
||||
func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulerframework.NodeInfo) {
|
||||
delete(b.backoffInfo, b.nodeGroupKey(nodeGroup))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,11 +24,11 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// GetDaemonSetPodsForNode returns daemonset nodes for the given pod.
|
||||
func GetDaemonSetPodsForNode(nodeInfo *schedulernodeinfo.NodeInfo, daemonsets []*appsv1.DaemonSet, predicateChecker simulator.PredicateChecker) ([]*apiv1.Pod, error) {
|
||||
func GetDaemonSetPodsForNode(nodeInfo *schedulerframework.NodeInfo, daemonsets []*appsv1.DaemonSet, predicateChecker simulator.PredicateChecker) ([]*apiv1.Pod, error) {
|
||||
result := make([]*apiv1.Pod, 0)
|
||||
|
||||
// here we can use empty snapshot
|
||||
|
|
@ -36,7 +36,11 @@ func GetDaemonSetPodsForNode(nodeInfo *schedulernodeinfo.NodeInfo, daemonsets []
|
|||
|
||||
// add a node with pods - node info is created by cloud provider,
|
||||
// we don't know whether it'll have pods or not.
|
||||
if err := clusterSnapshot.AddNodeWithPods(nodeInfo.Node(), nodeInfo.Pods()); err != nil {
|
||||
var pods []*apiv1.Pod
|
||||
for _, podInfo := range nodeInfo.Pods {
|
||||
pods = append(pods, podInfo.Pod)
|
||||
}
|
||||
if err := clusterSnapshot.AddNodeWithPods(nodeInfo.Node(), pods); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -35,7 +35,7 @@ import (
|
|||
func TestGetDaemonSetPodsForNode(t *testing.T) {
|
||||
node := BuildTestNode("node", 1000, 1000)
|
||||
SetNodeReadyState(node, true, time.Now())
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo := schedulerframework.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
predicateChecker, err := simulator.NewTestPredicateChecker()
|
||||
|
|
|
|||
|
|
@ -18,28 +18,28 @@ package scheduler
|
|||
|
||||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
|
||||
// and the values are the aggregated information for that node. Pods waiting lower priority pods preemption
|
||||
// (pod.Status.NominatedNodeName is set) are also added to list of pods for a node.
|
||||
func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]*schedulernodeinfo.NodeInfo {
|
||||
nodeNameToNodeInfo := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]*schedulerframework.NodeInfo {
|
||||
nodeNameToNodeInfo := make(map[string]*schedulerframework.NodeInfo)
|
||||
for _, pod := range pods {
|
||||
nodeName := pod.Spec.NodeName
|
||||
if nodeName == "" {
|
||||
nodeName = pod.Status.NominatedNodeName
|
||||
}
|
||||
if _, ok := nodeNameToNodeInfo[nodeName]; !ok {
|
||||
nodeNameToNodeInfo[nodeName] = schedulernodeinfo.NewNodeInfo()
|
||||
nodeNameToNodeInfo[nodeName] = schedulerframework.NewNodeInfo()
|
||||
}
|
||||
nodeNameToNodeInfo[nodeName].AddPod(pod)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, ok := nodeNameToNodeInfo[node.Name]; !ok {
|
||||
nodeNameToNodeInfo[node.Name] = schedulernodeinfo.NewNodeInfo()
|
||||
nodeNameToNodeInfo[node.Name] = schedulerframework.NewNodeInfo()
|
||||
}
|
||||
nodeNameToNodeInfo[node.Name].SetNode(node)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,9 +44,9 @@ func TestCreateNodeNameToInfoMap(t *testing.T) {
|
|||
|
||||
res := CreateNodeNameToInfoMap([]*apiv1.Pod{p1, p2, p3, podWaitingForPreemption}, []*apiv1.Node{n1, n2})
|
||||
assert.Equal(t, 2, len(res))
|
||||
assert.Equal(t, p1, res["node1"].Pods()[0])
|
||||
assert.Equal(t, podWaitingForPreemption, res["node1"].Pods()[1])
|
||||
assert.Equal(t, p1, res["node1"].Pods[0].Pod)
|
||||
assert.Equal(t, podWaitingForPreemption, res["node1"].Pods[1].Pod)
|
||||
assert.Equal(t, n1, res["node1"].Node())
|
||||
assert.Equal(t, p2, res["node2"].Pods()[0])
|
||||
assert.Equal(t, p2, res["node2"].Pods[0].Pod)
|
||||
assert.Equal(t, n2, res["node2"].Node())
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue