Move schedulercache to package nodeinfo
This commit is contained in:
parent
4f7600911f
commit
128729bae9
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// Asg implements NodeGroup interface.
|
||||
|
|
@ -173,7 +173,7 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
template, err := asg.manager.getAsgTemplate(asg.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -185,7 +185,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id))
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -297,7 +297,7 @@ func (ng *AwsNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
template, err := ng.awsManager.getAsgTemplate(ng.asg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -308,7 +308,7 @@ func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name))
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.asg.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// AgentPool implements NodeGroup interface for agent pools deployed by acs-engine.
|
||||
|
|
@ -375,7 +375,7 @@ func (as *AgentPool) Debug() string {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this agent pool.
|
||||
func (as *AgentPool) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (as *AgentPool) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
//ContainerServiceAgentPool implements NodeGroup interface for agent pool deployed in ACS/AKS
|
||||
|
|
@ -458,7 +458,7 @@ func (agentPool *ContainerServiceAgentPool) Nodes() ([]cloudprovider.Instance, e
|
|||
}
|
||||
|
||||
//TemplateNodeInfo is not implemented.
|
||||
func (agentPool *ContainerServiceAgentPool) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (agentPool *ContainerServiceAgentPool) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
)
|
||||
|
|
@ -458,7 +458,7 @@ func (scaleSet *ScaleSet) buildNodeFromTemplate(template compute.VirtualMachineS
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this scale set.
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
template, err := scaleSet.getVMSSInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -469,7 +469,7 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name))
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(scaleSet.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -304,13 +304,13 @@ func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo returns a schedulercache.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a schedulernodeinfo.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (asg *Asg) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
template, err := asg.baiducloudManager.getAsgTemplate(asg.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -319,7 +319,7 @@ func (asg *Asg) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name))
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.Name))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// CloudProvider contains configuration info and functions for interacting with
|
||||
|
|
@ -117,13 +117,13 @@ type NodeGroup interface {
|
|||
// Other fields are optional.
|
||||
Nodes() ([]Instance, error)
|
||||
|
||||
// TemplateNodeInfo returns a schedulercache.NodeInfo structure of an empty
|
||||
// TemplateNodeInfo returns a schedulernodeinfo.NodeInfo structure of an empty
|
||||
// (as if just started) node. This will be used in scale-up simulations to
|
||||
// predict what would a new node look like if a node group was expanded. The returned
|
||||
// NodeInfo is expected to have a fully populated Node object, with all of the labels,
|
||||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
TemplateNodeInfo() (*schedulercache.NodeInfo, error)
|
||||
TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error)
|
||||
|
||||
// Exist checks if the node group really exists on the cloud provider side. Allows to tell the
|
||||
// theoretical node group from the real one. Implementation required.
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -303,12 +303,12 @@ func (mig *gceMig) Autoprovisioned() bool {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (mig *gceMig) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (mig *gceMig) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
node, err := mig.gceManager.GetMigTemplateNode(mig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -406,12 +406,12 @@ func (mig *GkeMig) Autoprovisioned() bool {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (mig *GkeMig) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (mig *GkeMig) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
node, err := mig.gkeManager.GetMigTemplateNode(mig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(mig.Id()))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -245,7 +245,7 @@ func (nodeGroup *NodeGroup) DecreaseTargetSize(delta int) error {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (nodeGroup *NodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ limitations under the License.
|
|||
|
||||
package mocks
|
||||
|
||||
import cache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
import cache "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
import cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// OnScaleUpFunc is a function called on node group increase in TestCloudProvider.
|
||||
|
|
@ -50,7 +50,7 @@ type TestCloudProvider struct {
|
|||
onNodeGroupCreate func(string) error
|
||||
onNodeGroupDelete func(string) error
|
||||
machineTypes []string
|
||||
machineTemplates map[string]*schedulercache.NodeInfo
|
||||
machineTemplates map[string]*schedulernodeinfo.NodeInfo
|
||||
resourceLimiter *cloudprovider.ResourceLimiter
|
||||
}
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ func NewTestCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc)
|
|||
// NewTestAutoprovisioningCloudProvider builds new TestCloudProvider with autoprovisioning support
|
||||
func NewTestAutoprovisioningCloudProvider(onScaleUp OnScaleUpFunc, onScaleDown OnScaleDownFunc,
|
||||
onNodeGroupCreate OnNodeGroupCreateFunc, onNodeGroupDelete OnNodeGroupDeleteFunc,
|
||||
machineTypes []string, machineTemplates map[string]*schedulercache.NodeInfo) *TestCloudProvider {
|
||||
machineTypes []string, machineTemplates map[string]*schedulernodeinfo.NodeInfo) *TestCloudProvider {
|
||||
return &TestCloudProvider{
|
||||
nodes: make(map[string]string),
|
||||
groups: make(map[string]cloudprovider.NodeGroup),
|
||||
|
|
@ -375,7 +375,7 @@ func (tng *TestNodeGroup) Autoprovisioned() bool {
|
|||
}
|
||||
|
||||
// TemplateNodeInfo returns a node template for this node group.
|
||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (tng *TestNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
if tng.cloudProvider.machineTemplates == nil {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -120,7 +120,7 @@ type ClusterStateRegistry struct {
|
|||
scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest
|
||||
scaleDownRequests []*ScaleDownRequest
|
||||
nodes []*apiv1.Node
|
||||
nodeInfosForGroups map[string]*schedulercache.NodeInfo
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo
|
||||
cloudProvider cloudprovider.CloudProvider
|
||||
perNodeGroupReadiness map[string]Readiness
|
||||
totalReadiness Readiness
|
||||
|
|
@ -270,7 +270,7 @@ func (csr *ClusterStateRegistry) registerFailedScaleUpNoLock(nodeGroup cloudprov
|
|||
}
|
||||
|
||||
// UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats
|
||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulercache.NodeInfo, currentTime time.Time) error {
|
||||
func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo, currentTime time.Time) error {
|
||||
csr.updateNodeGroupMetrics()
|
||||
targetSizes, err := getTargetSizes(csr.cloudProvider)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
|
||||
kube_client "k8s.io/client-go/kubernetes"
|
||||
kube_record "k8s.io/client-go/tools/record"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
type nodeConfig struct {
|
||||
|
|
@ -144,8 +144,8 @@ type mockAutoprovisioningNodeGroupListProcessor struct {
|
|||
t *testing.T
|
||||
}
|
||||
|
||||
func (p *mockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulercache.NodeInfo, error) {
|
||||
func (p *mockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulernodeinfo.NodeInfo, error) {
|
||||
|
||||
machines, err := context.CloudProvider.GetAvailableMachineTypes()
|
||||
assert.NoError(p.t, err)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/glogx"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -49,7 +49,7 @@ const scaleUpLimitUnknown = math.MaxInt64
|
|||
|
||||
func computeScaleUpResourcesLeftLimits(
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
nodesFromNotAutoscaledGroups []*apiv1.Node,
|
||||
resourceLimiter *cloudprovider.ResourceLimiter) (scaleUpResourcesLimits, errors.AutoscalerError) {
|
||||
totalCores, totalMem, errCoresMem := calculateScaleUpCoresMemoryTotal(nodeGroups, nodeInfos, nodesFromNotAutoscaledGroups)
|
||||
|
|
@ -103,7 +103,7 @@ func computeScaleUpResourcesLeftLimits(
|
|||
|
||||
func calculateScaleUpCoresMemoryTotal(
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
nodesFromNotAutoscaledGroups []*apiv1.Node) (int64, int64, errors.AutoscalerError) {
|
||||
var coresTotal int64
|
||||
var memoryTotal int64
|
||||
|
|
@ -135,7 +135,7 @@ func calculateScaleUpCoresMemoryTotal(
|
|||
|
||||
func calculateScaleUpGpusTotal(
|
||||
nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
nodesFromNotAutoscaledGroups []*apiv1.Node) (map[string]int64, errors.AutoscalerError) {
|
||||
|
||||
result := make(map[string]int64)
|
||||
|
|
@ -178,7 +178,7 @@ func computeBelowMax(total int64, max int64) int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func computeScaleUpResourcesDelta(nodeInfo *schedulercache.NodeInfo, nodeGroup cloudprovider.NodeGroup, resourceLimiter *cloudprovider.ResourceLimiter) (scaleUpResourcesDelta, errors.AutoscalerError) {
|
||||
func computeScaleUpResourcesDelta(nodeInfo *schedulernodeinfo.NodeInfo, nodeGroup cloudprovider.NodeGroup, resourceLimiter *cloudprovider.ResourceLimiter) (scaleUpResourcesDelta, errors.AutoscalerError) {
|
||||
resultScaleUpDelta := make(scaleUpResourcesDelta)
|
||||
|
||||
nodeCPU, nodeMemory := getNodeInfoCoresAndMemory(nodeInfo)
|
||||
|
|
@ -222,7 +222,7 @@ func (limits *scaleUpResourcesLimits) checkScaleUpDeltaWithinLimits(delta scaleU
|
|||
return scaleUpLimitsNotExceeded()
|
||||
}
|
||||
|
||||
func getNodeInfoCoresAndMemory(nodeInfo *schedulercache.NodeInfo) (int64, int64) {
|
||||
func getNodeInfoCoresAndMemory(nodeInfo *schedulernodeinfo.NodeInfo) (int64, int64) {
|
||||
return getNodeCoresAndMemory(nodeInfo.Node())
|
||||
}
|
||||
|
||||
|
|
@ -244,7 +244,7 @@ var (
|
|||
// false if it didn't and error if an error occurred. Assumes that all nodes in the cluster are
|
||||
// ready and in sync with instance groups.
|
||||
func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.AutoscalingProcessors, clusterStateRegistry *clusterstate.ClusterStateRegistry, unschedulablePods []*apiv1.Pod,
|
||||
nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, nodeInfos map[string]*schedulercache.NodeInfo) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||
nodes []*apiv1.Node, daemonSets []*appsv1.DaemonSet, nodeInfos map[string]*schedulernodeinfo.NodeInfo) (*status.ScaleUpStatus, errors.AutoscalerError) {
|
||||
// From now on we only care about unschedulable pods that were marked after the newest
|
||||
// node became available for the scheduler.
|
||||
if len(unschedulablePods) == 0 {
|
||||
|
|
@ -283,7 +283,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto
|
|||
return &status.ScaleUpStatus{Result: status.ScaleUpError}, errLimits.AddPrefix("Could not compute total resources: ")
|
||||
}
|
||||
|
||||
upcomingNodes := make([]*schedulercache.NodeInfo, 0)
|
||||
upcomingNodes := make([]*schedulernodeinfo.NodeInfo, 0)
|
||||
for nodeGroup, numberOfNodes := range clusterStateRegistry.GetUpcomingNodes() {
|
||||
nodeTemplate, found := nodeInfos[nodeGroup]
|
||||
if !found {
|
||||
|
|
@ -554,7 +554,7 @@ type podsPredicatePassingCheckFunctions struct {
|
|||
func getPodsPredicatePassingCheckFunctions(
|
||||
context *context.AutoscalingContext,
|
||||
unschedulablePods []*apiv1.Pod,
|
||||
nodeInfos map[string]*schedulercache.NodeInfo) podsPredicatePassingCheckFunctions {
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo) podsPredicatePassingCheckFunctions {
|
||||
|
||||
podsPassingPredicatesCache := make(map[string][]*apiv1.Pod)
|
||||
podsNotPassingPredicatesCache := make(map[string]map[*apiv1.Pod]status.Reasons)
|
||||
|
|
@ -705,7 +705,7 @@ func executeScaleUp(context *context.AutoscalingContext, clusterStateRegistry *c
|
|||
func applyScaleUpResourcesLimits(
|
||||
newNodes int,
|
||||
scaleUpResourcesLeft scaleUpResourcesLimits,
|
||||
nodeInfo *schedulercache.NodeInfo,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
nodeGroup cloudprovider.NodeGroup,
|
||||
resourceLimiter *cloudprovider.ResourceLimiter) (int, errors.AutoscalerError) {
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ import (
|
|||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
|
|
@ -314,7 +314,7 @@ type assertingStrategy struct {
|
|||
t *testing.T
|
||||
}
|
||||
|
||||
func (s assertingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulercache.NodeInfo) *expander.Option {
|
||||
func (s assertingStrategy) BestOption(options []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
if len(s.expectedScaleUpOptions) > 0 {
|
||||
// empty s.expectedScaleUpOptions means we do not want to do assertion on contents of actual scaleUp options
|
||||
|
||||
|
|
@ -740,7 +740,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
|
||||
t1 := BuildTestNode("t1", 4000, 1000000)
|
||||
SetNodeReadyState(t1, true, time.Time{})
|
||||
ti1 := schedulercache.NewNodeInfo()
|
||||
ti1 := schedulernodeinfo.NewNodeInfo()
|
||||
ti1.SetNode(t1)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -750,7 +750,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
}, nil, func(nodeGroup string) error {
|
||||
createdGroups <- nodeGroup
|
||||
return nil
|
||||
}, nil, []string{"T1"}, map[string]*schedulercache.NodeInfo{"T1": ti1})
|
||||
}, nil, []string{"T1"}, map[string]*schedulernodeinfo.NodeInfo{"T1": ti1})
|
||||
|
||||
options := config.AutoscalingOptions{
|
||||
EstimatorName: estimator.BinpackingEstimatorName,
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
|
||||
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -68,7 +68,7 @@ type StaticAutoscaler struct {
|
|||
processors *ca_processors.AutoscalingProcessors
|
||||
initialized bool
|
||||
// Caches nodeInfo computed for previously seen nodes
|
||||
nodeInfoCache map[string]*schedulercache.NodeInfo
|
||||
nodeInfoCache map[string]*schedulernodeinfo.NodeInfo
|
||||
}
|
||||
|
||||
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
|
||||
|
|
@ -101,7 +101,7 @@ func NewStaticAutoscaler(
|
|||
scaleDown: scaleDown,
|
||||
processors: processors,
|
||||
clusterStateRegistry: clusterStateRegistry,
|
||||
nodeInfoCache: make(map[string]*schedulercache.NodeInfo),
|
||||
nodeInfoCache: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -536,7 +536,7 @@ func (a *StaticAutoscaler) actOnEmptyCluster(allNodes, readyNodes []*apiv1.Node,
|
|||
return false
|
||||
}
|
||||
|
||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulercache.NodeInfo, currentTime time.Time) errors.AutoscalerError {
|
||||
func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo, currentTime time.Time) errors.AutoscalerError {
|
||||
err := a.AutoscalingContext.CloudProvider.Refresh()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to refresh cloud provider config: %v", err)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
v1appslister "k8s.io/client-go/listers/apps/v1"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
|
@ -158,7 +158,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
p2 := BuildTestPod("p2", 600, 100)
|
||||
|
||||
tn := BuildTestNode("tn", 1000, 1000)
|
||||
tni := schedulercache.NewNodeInfo()
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -168,7 +168,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
return onScaleDownMock.ScaleDown(id, name)
|
||||
},
|
||||
nil, nil,
|
||||
nil, map[string]*schedulercache.NodeInfo{"ng1": tni})
|
||||
nil, map[string]*schedulernodeinfo.NodeInfo{"ng1": tni})
|
||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||
provider.AddNode("ng1", n1)
|
||||
ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup)
|
||||
|
|
@ -324,15 +324,15 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
|
||||
tn1 := BuildTestNode("tn1", 100, 1000)
|
||||
SetNodeReadyState(tn1, true, time.Now())
|
||||
tni1 := schedulercache.NewNodeInfo()
|
||||
tni1 := schedulernodeinfo.NewNodeInfo()
|
||||
tni1.SetNode(tn1)
|
||||
tn2 := BuildTestNode("tn2", 1000, 1000)
|
||||
SetNodeReadyState(tn2, true, time.Now())
|
||||
tni2 := schedulercache.NewNodeInfo()
|
||||
tni2 := schedulernodeinfo.NewNodeInfo()
|
||||
tni2.SetNode(tn2)
|
||||
tn3 := BuildTestNode("tn3", 100, 1000)
|
||||
SetNodeReadyState(tn2, true, time.Now())
|
||||
tni3 := schedulercache.NewNodeInfo()
|
||||
tni3 := schedulernodeinfo.NewNodeInfo()
|
||||
tni3.SetNode(tn3)
|
||||
|
||||
provider := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
|
|
@ -345,7 +345,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
}, func(id string) error {
|
||||
return onNodeGroupDeleteMock.Delete(id)
|
||||
},
|
||||
[]string{"TN1", "TN2"}, map[string]*schedulercache.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||
[]string{"TN1", "TN2"}, map[string]*schedulernodeinfo.NodeInfo{"TN1": tni1, "TN2": tni2, "ng1": tni3})
|
||||
provider.AddNodeGroup("ng1", 1, 10, 1)
|
||||
provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN1", 0, 10, 0, "TN1")
|
||||
autoprovisionedTN1 := reflect.ValueOf(provider.GetNodeGroup("autoprovisioned-TN1")).Interface().(*testprovider.TestNodeGroup)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -209,7 +209,7 @@ func filterOutExpendablePods(pods []*apiv1.Pod, expendablePodsPriorityCutoff int
|
|||
}
|
||||
|
||||
// checkPodsSchedulableOnNode checks if pods can be scheduled on the given node.
|
||||
func checkPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*apiv1.Pod, nodeGroupId string, nodeInfo *schedulercache.NodeInfo) map[*apiv1.Pod]*simulator.PredicateError {
|
||||
func checkPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*apiv1.Pod, nodeGroupId string, nodeInfo *schedulernodeinfo.NodeInfo) map[*apiv1.Pod]*simulator.PredicateError {
|
||||
schedulingErrors := map[*apiv1.Pod]*simulator.PredicateError{}
|
||||
loggingQuota := glogx.PodsLoggingQuota()
|
||||
podSchedulable := make(podSchedulableMap)
|
||||
|
|
@ -248,9 +248,9 @@ func checkPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*api
|
|||
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
|
||||
//
|
||||
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
|
||||
func getNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedulercache.NodeInfo, cloudProvider cloudprovider.CloudProvider, listers kube_util.ListerRegistry,
|
||||
daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (map[string]*schedulercache.NodeInfo, errors.AutoscalerError) {
|
||||
result := make(map[string]*schedulercache.NodeInfo)
|
||||
func getNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedulernodeinfo.NodeInfo, cloudProvider cloudprovider.CloudProvider, listers kube_util.ListerRegistry,
|
||||
daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (map[string]*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
result := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
seenGroups := make(map[string]bool)
|
||||
|
||||
// processNode returns information whether the nodeTemplate was generated and if there was an error.
|
||||
|
|
@ -286,7 +286,7 @@ func getNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
}
|
||||
added, id, typedErr := processNode(node)
|
||||
if typedErr != nil {
|
||||
return map[string]*schedulercache.NodeInfo{}, typedErr
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, typedErr
|
||||
}
|
||||
if added && nodeInfoCache != nil {
|
||||
if nodeInfoCopy, err := deepCopyNodeInfo(result[id]); err == nil {
|
||||
|
|
@ -319,7 +319,7 @@ func getNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
continue
|
||||
} else {
|
||||
klog.Errorf("Unable to build proper template node for %s: %v", id, err)
|
||||
return map[string]*schedulercache.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
||||
}
|
||||
}
|
||||
result[id] = nodeInfo
|
||||
|
|
@ -338,11 +338,11 @@ func getNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
if !kube_util.IsNodeReadyAndSchedulable(node) {
|
||||
added, _, typedErr := processNode(node)
|
||||
if typedErr != nil {
|
||||
return map[string]*schedulercache.NodeInfo{}, typedErr
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, typedErr
|
||||
}
|
||||
nodeGroup, err := cloudProvider.NodeGroupForNode(node)
|
||||
if err != nil {
|
||||
return map[string]*schedulercache.NodeInfo{}, errors.ToAutoscalerError(
|
||||
return map[string]*schedulernodeinfo.NodeInfo{}, errors.ToAutoscalerError(
|
||||
errors.CloudProviderError, err)
|
||||
}
|
||||
if added {
|
||||
|
|
@ -355,7 +355,7 @@ func getNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedu
|
|||
}
|
||||
|
||||
// getNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
|
||||
func getNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (*schedulercache.NodeInfo, errors.AutoscalerError) {
|
||||
func getNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
id := nodeGroup.Id()
|
||||
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
|
||||
if err != nil {
|
||||
|
|
@ -364,7 +364,7 @@ func getNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap
|
|||
|
||||
pods := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets, predicateChecker)
|
||||
pods = append(pods, baseNodeInfo.Pods()...)
|
||||
fullNodeInfo := schedulercache.NewNodeInfo(pods...)
|
||||
fullNodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
fullNodeInfo.SetNode(baseNodeInfo.Node())
|
||||
sanitizedNodeInfo, typedErr := sanitizeNodeInfo(fullNodeInfo, id)
|
||||
if typedErr != nil {
|
||||
|
|
@ -390,21 +390,21 @@ func filterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func deepCopyNodeInfo(nodeInfo *schedulercache.NodeInfo) (*schedulercache.NodeInfo, errors.AutoscalerError) {
|
||||
func deepCopyNodeInfo(nodeInfo *schedulernodeinfo.NodeInfo) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
newPods := make([]*apiv1.Pod, 0)
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
newPods = append(newPods, pod.DeepCopy())
|
||||
}
|
||||
|
||||
// Build a new node info.
|
||||
newNodeInfo := schedulercache.NewNodeInfo(newPods...)
|
||||
newNodeInfo := schedulernodeinfo.NewNodeInfo(newPods...)
|
||||
if err := newNodeInfo.SetNode(nodeInfo.Node().DeepCopy()); err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
return newNodeInfo, nil
|
||||
}
|
||||
|
||||
func sanitizeNodeInfo(nodeInfo *schedulercache.NodeInfo, nodeGroupName string) (*schedulercache.NodeInfo, errors.AutoscalerError) {
|
||||
func sanitizeNodeInfo(nodeInfo *schedulernodeinfo.NodeInfo, nodeGroupName string) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
// Sanitize node name.
|
||||
sanitizedNode, err := sanitizeTemplateNode(nodeInfo.Node(), nodeGroupName)
|
||||
if err != nil {
|
||||
|
|
@ -420,7 +420,7 @@ func sanitizeNodeInfo(nodeInfo *schedulercache.NodeInfo, nodeGroupName string) (
|
|||
}
|
||||
|
||||
// Build a new node info.
|
||||
sanitizedNodeInfo := schedulercache.NewNodeInfo(sanitizedPods...)
|
||||
sanitizedNodeInfo := schedulernodeinfo.NewNodeInfo(sanitizedPods...)
|
||||
if err := sanitizedNodeInfo.SetNode(sanitizedNode); err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ import (
|
|||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const MiB = 1024 * 1024
|
||||
|
|
@ -354,7 +354,7 @@ func TestFilterSchedulablePodsForNode(t *testing.T) {
|
|||
|
||||
tn := BuildTestNode("T1-abc", 2000, 2000000)
|
||||
SetNodeReadyState(tn, true, time.Time{})
|
||||
tni := schedulercache.NewNodeInfo()
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
context := &context.AutoscalingContext{
|
||||
|
|
@ -389,13 +389,13 @@ func TestGetNodeInfosForGroups(t *testing.T) {
|
|||
SetNodeReadyState(unready4, false, time.Now())
|
||||
|
||||
tn := BuildTestNode("tn", 5000, 5000)
|
||||
tni := schedulercache.NewNodeInfo()
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
// Cloud provider with TemplateNodeInfo implemented.
|
||||
provider1 := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
nil, nil, nil, nil, nil,
|
||||
map[string]*schedulercache.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
map[string]*schedulernodeinfo.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node.
|
||||
provider1.AddNode("ng1", ready1)
|
||||
provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node.
|
||||
|
|
@ -453,7 +453,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
SetNodeReadyState(ready6, true, time.Now())
|
||||
|
||||
tn := BuildTestNode("tn", 10000, 10000)
|
||||
tni := schedulercache.NewNodeInfo()
|
||||
tni := schedulernodeinfo.NewNodeInfo()
|
||||
tni.SetNode(tn)
|
||||
|
||||
lastDeletedGroup := ""
|
||||
|
|
@ -465,7 +465,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
// Cloud provider with TemplateNodeInfo implemented.
|
||||
provider1 := testprovider.NewTestAutoprovisioningCloudProvider(
|
||||
nil, nil, nil, onDeleteGroup, nil,
|
||||
map[string]*schedulercache.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
map[string]*schedulernodeinfo.NodeInfo{"ng3": tni, "ng4": tni})
|
||||
provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node.
|
||||
provider1.AddNode("ng1", ready1)
|
||||
provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node.
|
||||
|
|
@ -482,7 +482,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
|
||||
predicateChecker := simulator.NewTestPredicateChecker()
|
||||
|
||||
nodeInfoCache := make(map[string]*schedulercache.NodeInfo)
|
||||
nodeInfoCache := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
|
||||
// Fill cache
|
||||
res, err := getNodeInfosForGroups([]*apiv1.Node{unready4, unready3, ready2, ready1}, nodeInfoCache,
|
||||
|
|
@ -540,10 +540,10 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
assert.False(t, found)
|
||||
|
||||
// Fill cache manually
|
||||
infoNg4Node6 := schedulercache.NewNodeInfo()
|
||||
infoNg4Node6 := schedulernodeinfo.NewNodeInfo()
|
||||
err2 := infoNg4Node6.SetNode(ready6.DeepCopy())
|
||||
assert.NoError(t, err2)
|
||||
nodeInfoCache = map[string]*schedulercache.NodeInfo{"ng4": infoNg4Node6}
|
||||
nodeInfoCache = map[string]*schedulernodeinfo.NodeInfo{"ng4": infoNg4Node6}
|
||||
// Check if cache was used
|
||||
res, err = getNodeInfosForGroups([]*apiv1.Node{ready1, ready2}, nodeInfoCache,
|
||||
provider1, registry, []*appsv1.DaemonSet{}, predicateChecker)
|
||||
|
|
@ -617,7 +617,7 @@ func TestSanitizeNodeInfo(t *testing.T) {
|
|||
|
||||
node := BuildTestNode("node", 1000, 1000)
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(pod)
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pod)
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
res, err := sanitizeNodeInfo(nodeInfo, "test-group")
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const basicEstimatorDeprecationMessage = "WARNING: basic estimator is deprecated. It will be removed in Cluster Autoscaler 1.5."
|
||||
|
|
@ -98,7 +98,7 @@ func (basicEstimator *BasicNodeEstimator) GetDebug() string {
|
|||
}
|
||||
|
||||
// Estimate estimates the number needed of nodes of the given shape.
|
||||
func (basicEstimator *BasicNodeEstimator) Estimate(pods []*apiv1.Pod, nodeInfo *schedulercache.NodeInfo, upcomingNodes []*schedulercache.NodeInfo) int {
|
||||
func (basicEstimator *BasicNodeEstimator) Estimate(pods []*apiv1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, upcomingNodes []*schedulernodeinfo.NodeInfo) int {
|
||||
for _, pod := range pods {
|
||||
basicEstimator.Add(pod)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -64,11 +64,11 @@ func TestEstimate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
estimator := NewBasicNodeEstimator()
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulernodeinfo.NodeInfo{})
|
||||
|
||||
// Check result.
|
||||
assert.Equal(t, 3, estimate)
|
||||
|
|
@ -101,11 +101,11 @@ func TestEstimateWithComing(t *testing.T) {
|
|||
},
|
||||
}
|
||||
node.Status.Allocatable = node.Status.Capacity
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
estimator := NewBasicNodeEstimator()
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{nodeInfo, nodeInfo})
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulernodeinfo.NodeInfo{nodeInfo, nodeInfo})
|
||||
|
||||
// Check result.
|
||||
assert.Equal(t, 1, estimate)
|
||||
|
|
@ -142,11 +142,11 @@ func TestEstimateWithPorts(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
estimator := NewBasicNodeEstimator()
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulernodeinfo.NodeInfo{})
|
||||
assert.Contains(t, estimator.GetDebug(), "CPU")
|
||||
assert.Equal(t, 5, estimate)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
schedulerUtils "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// podInfo contains Pod and score that corresponds to how important it is to handle the pod first.
|
||||
|
|
@ -51,13 +51,13 @@ func NewBinpackingNodeEstimator(predicateChecker *simulator.PredicateChecker) *B
|
|||
// still be maintained.
|
||||
// It is assumed that all pods from the given list can fit to nodeTemplate.
|
||||
// Returns the number of nodes needed to accommodate all pods from the list.
|
||||
func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTemplate *schedulercache.NodeInfo,
|
||||
upcomingNodes []*schedulercache.NodeInfo) int {
|
||||
func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTemplate *schedulernodeinfo.NodeInfo,
|
||||
upcomingNodes []*schedulernodeinfo.NodeInfo) int {
|
||||
|
||||
podInfos := calculatePodScore(pods, nodeTemplate)
|
||||
sort.Slice(podInfos, func(i, j int) bool { return podInfos[i].score > podInfos[j].score })
|
||||
|
||||
newNodes := make([]*schedulercache.NodeInfo, 0)
|
||||
newNodes := make([]*schedulernodeinfo.NodeInfo, 0)
|
||||
newNodes = append(newNodes, upcomingNodes...)
|
||||
|
||||
for _, podInfo := range podInfos {
|
||||
|
|
@ -79,7 +79,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTempla
|
|||
// Calculates score for all pods and returns podInfo structure.
|
||||
// Score is defined as cpu_sum/node_capacity + mem_sum/node_capacity.
|
||||
// Pods that have bigger requirements should be processed first, thus have higher scores.
|
||||
func calculatePodScore(pods []*apiv1.Pod, nodeTemplate *schedulercache.NodeInfo) []*podInfo {
|
||||
func calculatePodScore(pods []*apiv1.Pod, nodeTemplate *schedulernodeinfo.NodeInfo) []*podInfo {
|
||||
podInfos := make([]*podInfo, 0, len(pods))
|
||||
|
||||
for _, pod := range pods {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -53,9 +53,9 @@ func TestBinpackingEstimate(t *testing.T) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulernodeinfo.NodeInfo{})
|
||||
assert.Equal(t, 5, estimate)
|
||||
}
|
||||
|
||||
|
|
@ -82,9 +82,9 @@ func TestBinpackingEstimateComingNodes(t *testing.T) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{nodeInfo, nodeInfo})
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulernodeinfo.NodeInfo{nodeInfo, nodeInfo})
|
||||
// 5 - 2 nodes that are coming.
|
||||
assert.Equal(t, 3, estimate)
|
||||
}
|
||||
|
|
@ -116,8 +116,8 @@ func TestBinpackingEstimateWithPorts(t *testing.T) {
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
|
||||
estimate := estimator.Estimate(pods, nodeInfo, []*schedulernodeinfo.NodeInfo{})
|
||||
assert.Equal(t, 8, estimate)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -41,7 +41,7 @@ var AvailableEstimators = []string{BinpackingEstimatorName, deprecated(BasicEsti
|
|||
|
||||
// Estimator calculates the number of nodes of given type needed to schedule pods.
|
||||
type Estimator interface {
|
||||
Estimate([]*apiv1.Pod, *schedulercache.NodeInfo, []*schedulercache.NodeInfo) int
|
||||
Estimate([]*apiv1.Pod, *schedulernodeinfo.NodeInfo, []*schedulernodeinfo.NodeInfo) int
|
||||
}
|
||||
|
||||
// EstimatorBuilder creates a new estimator object.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package expander
|
|||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -46,5 +46,5 @@ type Option struct {
|
|||
|
||||
// Strategy describes an interface for selecting the best option when scaling up
|
||||
type Strategy interface {
|
||||
BestOption(options []Option, nodeInfo map[string]*schedulercache.NodeInfo) *Option
|
||||
BestOption(options []Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *Option
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package mostpods
|
|||
import (
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
type mostpods struct {
|
||||
|
|
@ -32,7 +32,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// BestOption Selects the expansion option that schedules the most pods
|
||||
func (m *mostpods) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulercache.NodeInfo) *expander.Option {
|
||||
func (m *mostpods) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
var maxPods int
|
||||
var maxOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/units"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -87,7 +87,7 @@ func NewStrategy(pricingModel cloudprovider.PricingModel,
|
|||
}
|
||||
|
||||
// BestOption selects option based on cost and preferred node type.
|
||||
func (p *priceBased) BestOption(expansionOptions []expander.Option, nodeInfos map[string]*schedulercache.NodeInfo) *expander.Option {
|
||||
func (p *priceBased) BestOption(expansionOptions []expander.Option, nodeInfos map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
var bestOption *expander.Option
|
||||
bestOptionScore := 0.0
|
||||
now := time.Now()
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -76,13 +76,13 @@ func TestPriceExpander(t *testing.T) {
|
|||
ng2, _ := provider.NodeGroupForNode(n2)
|
||||
ng3, _ := provider.NewNodeGroup("MT1", nil, nil, nil, nil)
|
||||
|
||||
ni1 := schedulercache.NewNodeInfo()
|
||||
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulercache.NewNodeInfo()
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
ni3 := schedulercache.NewNodeInfo()
|
||||
ni3 := schedulernodeinfo.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
nodeInfosForGroups := map[string]*schedulercache.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"math/rand"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
type random struct {
|
||||
|
|
@ -32,7 +32,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// RandomExpansion Selects from the expansion options at random
|
||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulercache.NodeInfo) *expander.Option {
|
||||
func (r *random) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
if len(expansionOptions) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
type leastwaste struct {
|
||||
|
|
@ -35,7 +35,7 @@ func NewStrategy() expander.Strategy {
|
|||
}
|
||||
|
||||
// BestOption Finds the option that wastes the least fraction of CPU and Memory
|
||||
func (l *leastwaste) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulercache.NodeInfo) *expander.Option {
|
||||
func (l *leastwaste) BestOption(expansionOptions []expander.Option, nodeInfo map[string]*schedulernodeinfo.NodeInfo) *expander.Option {
|
||||
var leastWastedScore float64
|
||||
var leastWastedOptions []expander.Option
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
type FakeNodeGroup struct {
|
||||
|
|
@ -45,7 +45,7 @@ func (f *FakeNodeGroup) Debug() string { return f.id }
|
|||
func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
|
||||
return []cloudprovider.Instance{}, nil
|
||||
}
|
||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) {
|
||||
func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
}
|
||||
func (f *FakeNodeGroup) Exist() bool { return true }
|
||||
|
|
@ -55,7 +55,7 @@ func (f *FakeNodeGroup) Create() (cloudprovider.NodeGroup, error) {
|
|||
func (f *FakeNodeGroup) Delete() error { return cloudprovider.ErrNotImplemented }
|
||||
func (f *FakeNodeGroup) Autoprovisioned() bool { return false }
|
||||
|
||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulercache.NodeInfo {
|
||||
func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulernodeinfo.NodeInfo {
|
||||
node := &apiv1.Node{
|
||||
Status: apiv1.NodeStatus{
|
||||
Capacity: apiv1.ResourceList{
|
||||
|
|
@ -68,7 +68,7 @@ func makeNodeInfo(cpu int64, memory int64, pods int64) *schedulercache.NodeInfo
|
|||
node.Status.Allocatable = node.Status.Capacity
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
return nodeInfo
|
||||
|
|
@ -79,7 +79,7 @@ func TestLeastWaste(t *testing.T) {
|
|||
memoryPerPod := int64(1000 * 1024 * 1024)
|
||||
e := NewStrategy()
|
||||
balancedNodeInfo := makeNodeInfo(16*cpuPerPod, 16*memoryPerPod, 100)
|
||||
nodeMap := map[string]*schedulercache.NodeInfo{"balanced": balancedNodeInfo}
|
||||
nodeMap := map[string]*schedulernodeinfo.NodeInfo{"balanced": balancedNodeInfo}
|
||||
balancedOption := expander.Option{NodeGroup: &FakeNodeGroup{"balanced"}, NodeCount: 1}
|
||||
|
||||
// Test without any pods, one node info
|
||||
|
|
|
|||
|
|
@ -20,14 +20,14 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// NodeGroupListProcessor processes lists of NodeGroups considered in scale-up.
|
||||
type NodeGroupListProcessor interface {
|
||||
Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup,
|
||||
nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulercache.NodeInfo, error)
|
||||
nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulernodeinfo.NodeInfo, error)
|
||||
CleanUp()
|
||||
}
|
||||
|
||||
|
|
@ -41,8 +41,8 @@ func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor {
|
|||
}
|
||||
|
||||
// Process processes lists of unschedulable and scheduled pods before scaling of the cluster.
|
||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulercache.NodeInfo, error) {
|
||||
func (p *NoOpNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulernodeinfo.NodeInfo, error) {
|
||||
return nodeGroups, nodeInfos, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -35,7 +35,7 @@ type BalancingNodeGroupSetProcessor struct {
|
|||
// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one.
|
||||
// Two groups are similar if the NodeInfos for them compare equal using IsNodeInfoSimilar.
|
||||
func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
|
||||
result := []cloudprovider.NodeGroup{}
|
||||
nodeGroupId := nodeGroup.Id()
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -42,14 +42,14 @@ func basicSimilarNodeGroupsTest(t *testing.T, processor NodeGroupSetProcessor) {
|
|||
provider.AddNode("ng2", n2)
|
||||
provider.AddNode("ng3", n3)
|
||||
|
||||
ni1 := schedulercache.NewNodeInfo()
|
||||
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulercache.NewNodeInfo()
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
ni3 := schedulercache.NewNodeInfo()
|
||||
ni3 := schedulernodeinfo.NewNodeInfo()
|
||||
ni3.SetNode(n3)
|
||||
|
||||
nodeInfosForGroups := map[string]*schedulercache.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2, "ng3": ni3,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -36,7 +36,7 @@ const (
|
|||
|
||||
// NodeInfoComparator is a function that tells if two nodes are from NodeGroups
|
||||
// similar enough to be considered a part of a single NodeGroupSet.
|
||||
type NodeInfoComparator func(n1, n2 *schedulercache.NodeInfo) bool
|
||||
type NodeInfoComparator func(n1, n2 *schedulernodeinfo.NodeInfo) bool
|
||||
|
||||
func compareResourceMapsWithTolerance(resources map[apiv1.ResourceName][]resource.Quantity,
|
||||
maxDifferenceRatio float64) bool {
|
||||
|
|
@ -58,11 +58,11 @@ func compareResourceMapsWithTolerance(resources map[apiv1.ResourceName][]resourc
|
|||
// somewhat arbitrary, but generally we check if resources provided by both nodes
|
||||
// are similar enough to likely be the same type of machine and if the set of labels
|
||||
// is the same (except for a pre-defined set of labels like hostname or zone).
|
||||
func IsNodeInfoSimilar(n1, n2 *schedulercache.NodeInfo) bool {
|
||||
func IsNodeInfoSimilar(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
capacity := make(map[apiv1.ResourceName][]resource.Quantity)
|
||||
allocatable := make(map[apiv1.ResourceName][]resource.Quantity)
|
||||
free := make(map[apiv1.ResourceName][]resource.Quantity)
|
||||
nodes := []*schedulercache.NodeInfo{n1, n2}
|
||||
nodes := []*schedulernodeinfo.NodeInfo{n1, n2}
|
||||
for _, node := range nodes {
|
||||
for res, quantity := range node.Node().Status.Capacity {
|
||||
capacity[res] = append(capacity[res], quantity)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -34,9 +34,9 @@ func checkNodesSimilar(t *testing.T, n1, n2 *apiv1.Node, comparator NodeInfoComp
|
|||
}
|
||||
|
||||
func checkNodesSimilarWithPods(t *testing.T, n1, n2 *apiv1.Node, pods1, pods2 []*apiv1.Pod, comparator NodeInfoComparator, shouldEqual bool) {
|
||||
ni1 := schedulercache.NewNodeInfo(pods1...)
|
||||
ni1 := schedulernodeinfo.NewNodeInfo(pods1...)
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulercache.NewNodeInfo(pods2...)
|
||||
ni2 := schedulernodeinfo.NewNodeInfo(pods2...)
|
||||
ni2.SetNode(n2)
|
||||
assert.Equal(t, shouldEqual, comparator(ni1, ni2))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@ limitations under the License.
|
|||
package nodegroupset
|
||||
|
||||
import (
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// GkeNodepoolLabel is a label specifying GKE node pool particular node belongs to.
|
||||
const GkeNodepoolLabel = "cloud.google.com/gke-nodepool"
|
||||
|
||||
func nodesFromSameGkeNodePool(n1, n2 *schedulercache.NodeInfo) bool {
|
||||
func nodesFromSameGkeNodePool(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
n1GkeNodePool := n1.Node().Labels[GkeNodepoolLabel]
|
||||
n2GkeNodePool := n2.Node().Labels[GkeNodepoolLabel]
|
||||
return n1GkeNodePool != "" && n1GkeNodePool == n2GkeNodePool
|
||||
|
|
@ -32,7 +32,7 @@ func nodesFromSameGkeNodePool(n1, n2 *schedulercache.NodeInfo) bool {
|
|||
// IsGkeNodeInfoSimilar compares if two nodes should be considered part of the
|
||||
// same NodeGroupSet. This is true if they either belong to the same GKE nodepool
|
||||
// or match usual conditions checked by IsNodeInfoSimilar.
|
||||
func IsGkeNodeInfoSimilar(n1, n2 *schedulercache.NodeInfo) bool {
|
||||
func IsGkeNodeInfoSimilar(n1, n2 *schedulernodeinfo.NodeInfo) bool {
|
||||
if nodesFromSameGkeNodePool(n1, n2) {
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -76,12 +76,12 @@ func TestFindSimilarNodeGroupsGkeByLabel(t *testing.T) {
|
|||
provider.AddNode("ng1", n1)
|
||||
provider.AddNode("ng2", n2)
|
||||
|
||||
ni1 := schedulercache.NewNodeInfo()
|
||||
ni1 := schedulernodeinfo.NewNodeInfo()
|
||||
ni1.SetNode(n1)
|
||||
ni2 := schedulercache.NewNodeInfo()
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
ni2.SetNode(n2)
|
||||
|
||||
nodeInfosForGroups := map[string]*schedulercache.NodeInfo{
|
||||
nodeInfosForGroups := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"ng1": ni1, "ng2": ni2,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/context"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// ScaleUpInfo contains information about planned scale-up of a single NodeGroup
|
||||
|
|
@ -45,7 +45,7 @@ func (s ScaleUpInfo) String() string {
|
|||
// NodeGroupSetProcessor finds nodegroups that are similar and allows balancing scale-up between them.
|
||||
type NodeGroupSetProcessor interface {
|
||||
FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError)
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError)
|
||||
|
||||
BalanceScaleUpBetweenGroups(context *context.AutoscalingContext, groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError)
|
||||
CleanUp()
|
||||
|
|
@ -57,7 +57,7 @@ type NoOpNodeGroupSetProcessor struct {
|
|||
|
||||
// FindSimilarNodeGroups returns a list of NodeGroups similar to the one provided in parameter.
|
||||
func (n *NoOpNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup,
|
||||
nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
nodeInfosForGroups map[string]*schedulernodeinfo.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) {
|
||||
return []cloudprovider.NodeGroup{}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ On Cluster Autoscaler startup kubemark Cloud Provider will parse the config pass
|
|||
* `IncreaseSize(delta int)` - creation of #delta singleton Replication Controllers in external cluster with label `'autoscaling.k8s.io/nodegroup'=Name()`
|
||||
* `DeleteNodes([]*apiv1.Node)` - removal of specified Replication Controllers
|
||||
* `DecreaseTargetSize(delta int) error` - removal of Replication Controllers that have not yet been created
|
||||
* `TemplateNodeInfo() (*schedulercache.NodeInfo, error)` - will return ErrNotImplemented
|
||||
* `TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error)` - will return ErrNotImplemented
|
||||
* `MaxSize()` - specified via config (`--nodes={MIN}:{MAX}:{NG_LABEL_VALUE}`)
|
||||
* `MinSize()` - specified via config
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
policyv1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
|
@ -152,7 +152,7 @@ func FindEmptyNodesToRemove(candidates []*apiv1.Node, pods []*apiv1.Pod) []*apiv
|
|||
// CalculateUtilization calculates utilization of a node, defined as maximum of (cpu, memory) utilization.
|
||||
// Per resource utilization is the sum of requests for it divided by allocatable. It also returns the individual
|
||||
// cpu and memory utilization.
|
||||
func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, skipDaemonSetPods, skipMirrorPods bool) (utilInfo UtilizationInfo, err error) {
|
||||
func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulernodeinfo.NodeInfo, skipDaemonSetPods, skipMirrorPods bool) (utilInfo UtilizationInfo, err error) {
|
||||
cpu, err := calculateUtilizationOfResource(node, nodeInfo, apiv1.ResourceCPU, skipDaemonSetPods, skipMirrorPods)
|
||||
if err != nil {
|
||||
return UtilizationInfo{}, err
|
||||
|
|
@ -164,7 +164,7 @@ func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, s
|
|||
return UtilizationInfo{CpuUtil: cpu, MemUtil: mem, Utilization: math.Max(cpu, mem)}, nil
|
||||
}
|
||||
|
||||
func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) {
|
||||
func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulernodeinfo.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) {
|
||||
nodeAllocatable, found := node.Status.Allocatable[resourceName]
|
||||
if !found {
|
||||
return 0, fmt.Errorf("failed to get %v from %s", resourceName, node.Name)
|
||||
|
|
@ -192,11 +192,11 @@ func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.N
|
|||
}
|
||||
|
||||
// TODO: We don't need to pass list of nodes here as they are already available in nodeInfos.
|
||||
func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, nodeInfos map[string]*schedulercache.NodeInfo,
|
||||
func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, nodeInfos map[string]*schedulernodeinfo.NodeInfo,
|
||||
predicateChecker *PredicateChecker, oldHints map[string]string, newHints map[string]string, usageTracker *UsageTracker,
|
||||
timestamp time.Time) error {
|
||||
|
||||
newNodeInfos := make(map[string]*schedulercache.NodeInfo)
|
||||
newNodeInfos := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
for k, v := range nodeInfos {
|
||||
newNodeInfos[k] = v
|
||||
}
|
||||
|
|
@ -225,7 +225,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no
|
|||
klog.V(4).Infof("Pod %s/%s can be moved to %s", pod.Namespace, pod.Name, nodename)
|
||||
podsOnNode := nodeInfo.Pods()
|
||||
podsOnNode = append(podsOnNode, pod)
|
||||
newNodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
|
||||
newNodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||
newNodeInfo.SetNode(nodeInfo.Node())
|
||||
newNodeInfos[nodename] = newNodeInfo
|
||||
newHints[podKey(pod)] = nodename
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
policyv1 "k8s.io/api/policy/v1beta1"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -34,7 +34,7 @@ func TestUtilization(t *testing.T) {
|
|||
pod := BuildTestPod("p1", 100, 200000)
|
||||
pod2 := BuildTestPod("p2", -1, -1)
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2)
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pod, pod, pod2)
|
||||
node := BuildTestNode("node1", 2000, 2000000)
|
||||
SetNodeReadyState(node, true, time.Time{})
|
||||
|
||||
|
|
@ -50,12 +50,12 @@ func TestUtilization(t *testing.T) {
|
|||
daemonSetPod3 := BuildTestPod("p3", 100, 200000)
|
||||
daemonSetPod3.OwnerReferences = GenerateOwnerReferences("ds", "DaemonSet", "apps/v1", "")
|
||||
|
||||
nodeInfo = schedulercache.NewNodeInfo(pod, pod, pod2, daemonSetPod3)
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, pod2, daemonSetPod3)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, true, false)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
||||
nodeInfo = schedulercache.NewNodeInfo(pod, pod2, daemonSetPod3)
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod2, daemonSetPod3)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, false, false)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
|
@ -65,12 +65,12 @@ func TestUtilization(t *testing.T) {
|
|||
types.ConfigMirrorAnnotationKey: "",
|
||||
}
|
||||
|
||||
nodeInfo = schedulercache.NewNodeInfo(pod, pod, pod2, mirrorPod4)
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod, pod2, mirrorPod4)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
||||
nodeInfo = schedulercache.NewNodeInfo(pod, pod2, mirrorPod4)
|
||||
nodeInfo = schedulernodeinfo.NewNodeInfo(pod, pod2, mirrorPod4)
|
||||
utilInfo, err = CalculateUtilization(node, nodeInfo, false, false)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01)
|
||||
|
|
@ -81,9 +81,9 @@ func TestFindPlaceAllOk(t *testing.T) {
|
|||
new1 := BuildTestPod("p2", 600, 500000)
|
||||
new2 := BuildTestPod("p3", 500, 500000)
|
||||
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
"n1": schedulercache.NewNodeInfo(pod1),
|
||||
"n2": schedulercache.NewNodeInfo(),
|
||||
nodeInfos := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"n1": schedulernodeinfo.NewNodeInfo(pod1),
|
||||
"n2": schedulernodeinfo.NewNodeInfo(),
|
||||
}
|
||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||
SetNodeReadyState(node1, true, time.Time{})
|
||||
|
|
@ -115,10 +115,10 @@ func TestFindPlaceAllBas(t *testing.T) {
|
|||
new2 := BuildTestPod("p3", 500, 500000)
|
||||
new3 := BuildTestPod("p4", 700, 500000)
|
||||
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
"n1": schedulercache.NewNodeInfo(pod1),
|
||||
"n2": schedulercache.NewNodeInfo(),
|
||||
"nbad": schedulercache.NewNodeInfo(),
|
||||
nodeInfos := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"n1": schedulernodeinfo.NewNodeInfo(pod1),
|
||||
"n2": schedulernodeinfo.NewNodeInfo(),
|
||||
"nbad": schedulernodeinfo.NewNodeInfo(),
|
||||
}
|
||||
nodebad := BuildTestNode("nbad", 1000, 2000000)
|
||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||
|
|
@ -151,9 +151,9 @@ func TestFindPlaceAllBas(t *testing.T) {
|
|||
func TestFindNone(t *testing.T) {
|
||||
pod1 := BuildTestPod("p1", 300, 500000)
|
||||
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
"n1": schedulercache.NewNodeInfo(pod1),
|
||||
"n2": schedulercache.NewNodeInfo(),
|
||||
nodeInfos := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"n1": schedulernodeinfo.NewNodeInfo(pod1),
|
||||
"n2": schedulernodeinfo.NewNodeInfo(),
|
||||
}
|
||||
node1 := BuildTestNode("n1", 1000, 2000000)
|
||||
SetNodeReadyState(node1, true, time.Time{})
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// FastGetPodsToMove returns a list of pods that should be moved elsewhere if the node
|
||||
|
|
@ -34,7 +34,7 @@ import (
|
|||
// Based on kubectl drain code. It makes an assumption that RC, DS, Jobs and RS were deleted
|
||||
// along with their pods (no abandoned pods with dangling created-by annotation). Useful for fast
|
||||
// checks.
|
||||
func FastGetPodsToMove(nodeInfo *schedulercache.NodeInfo, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool,
|
||||
func FastGetPodsToMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool,
|
||||
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, error) {
|
||||
pods, err := drain.GetPodsForDeletionOnNodeDrain(
|
||||
nodeInfo.Pods(),
|
||||
|
|
@ -61,7 +61,7 @@ func FastGetPodsToMove(nodeInfo *schedulercache.NodeInfo, skipNodesWithSystemPod
|
|||
// is drained. Raises error if there is an unreplicated pod.
|
||||
// Based on kubectl drain code. It checks whether RC, DS, Jobs and RS that created these pods
|
||||
// still exist.
|
||||
func DetailedGetPodsForMove(nodeInfo *schedulercache.NodeInfo, skipNodesWithSystemPods bool,
|
||||
func DetailedGetPodsForMove(nodeInfo *schedulernodeinfo.NodeInfo, skipNodesWithSystemPods bool,
|
||||
skipNodesWithLocalStorage bool, listers kube_util.ListerRegistry, minReplicaCount int32,
|
||||
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, error) {
|
||||
pods, err := drain.GetPodsForDeletionOnNodeDrain(
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -39,7 +39,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
},
|
||||
}
|
||||
_, err := FastGetPodsToMove(schedulercache.NewNodeInfo(pod1), true, true, nil)
|
||||
_, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod1), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Replicated pod
|
||||
|
|
@ -50,7 +50,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
r2, err := FastGetPodsToMove(schedulercache.NewNodeInfo(pod2), true, true, nil)
|
||||
r2, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod2), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(r2))
|
||||
assert.Equal(t, pod2, r2[0])
|
||||
|
|
@ -65,7 +65,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
r3, err := FastGetPodsToMove(schedulercache.NewNodeInfo(pod3), true, true, nil)
|
||||
r3, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod3), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(r3))
|
||||
|
||||
|
|
@ -77,7 +77,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("ds", "DaemonSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
r4, err := FastGetPodsToMove(schedulercache.NewNodeInfo(pod2, pod3, pod4), true, true, nil)
|
||||
r4, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod2, pod3, pod4), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(r4))
|
||||
assert.Equal(t, pod2, r4[0])
|
||||
|
|
@ -90,7 +90,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
OwnerReferences: GenerateOwnerReferences("rs", "ReplicaSet", "extensions/v1beta1", ""),
|
||||
},
|
||||
}
|
||||
_, err = FastGetPodsToMove(schedulercache.NewNodeInfo(pod5), true, true, nil)
|
||||
_, err = FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod5), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Local storage
|
||||
|
|
@ -110,7 +110,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err = FastGetPodsToMove(schedulercache.NewNodeInfo(pod6), true, true, nil)
|
||||
_, err = FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod6), true, true, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Non-local storage
|
||||
|
|
@ -132,7 +132,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
r7, err := FastGetPodsToMove(schedulercache.NewNodeInfo(pod7), true, true, nil)
|
||||
r7, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod7), true, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(r7))
|
||||
|
||||
|
|
@ -167,7 +167,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err = FastGetPodsToMove(schedulercache.NewNodeInfo(pod8), true, true, []*policyv1.PodDisruptionBudget{pdb8})
|
||||
_, err = FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod8), true, true, []*policyv1.PodDisruptionBudget{pdb8})
|
||||
assert.Error(t, err)
|
||||
|
||||
// Pdb allowing
|
||||
|
|
@ -200,7 +200,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
r9, err := FastGetPodsToMove(schedulercache.NewNodeInfo(pod9), true, true, []*policyv1.PodDisruptionBudget{pdb9})
|
||||
r9, err := FastGetPodsToMove(schedulernodeinfo.NewNodeInfo(pod9), true, true, []*policyv1.PodDisruptionBudget{pdb9})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(r9))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// GetRequiredPodsForNode returns a list of pods that would appear on the node if the
|
||||
|
|
@ -75,12 +75,12 @@ func GetRequiredPodsForNode(nodename string, listers kube_util.ListerRegistry) (
|
|||
}
|
||||
|
||||
// BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created.
|
||||
func BuildNodeInfoForNode(node *apiv1.Node, listers kube_util.ListerRegistry) (*schedulercache.NodeInfo, errors.AutoscalerError) {
|
||||
func BuildNodeInfoForNode(node *apiv1.Node, listers kube_util.ListerRegistry) (*schedulernodeinfo.NodeInfo, errors.AutoscalerError) {
|
||||
requiredPods, err := GetRequiredPodsForNode(node.Name, listers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := schedulercache.NewNodeInfo(requiredPods...)
|
||||
result := schedulernodeinfo.NewNodeInfo(requiredPods...)
|
||||
if err := result.SetNode(node); err != nil {
|
||||
return nil, errors.ToAutoscalerError(errors.InternalError, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
kube_client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
|
||||
// We need to import provider to initialize default scheduler.
|
||||
|
|
@ -132,7 +132,7 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{})
|
|||
}, nil
|
||||
}
|
||||
|
||||
func isNodeReadyAndSchedulablePredicate(pod *apiv1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool,
|
||||
func isNodeReadyAndSchedulablePredicate(pod *apiv1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool,
|
||||
[]algorithm.PredicateFailureReason, error) {
|
||||
ready := kube_util.IsNodeReadyAndSchedulable(nodeInfo.Node())
|
||||
if !ready {
|
||||
|
|
@ -148,7 +148,7 @@ func NewTestPredicateChecker() *PredicateChecker {
|
|||
{name: "default", predicate: predicates.GeneralPredicates},
|
||||
{name: "ready", predicate: isNodeReadyAndSchedulablePredicate},
|
||||
},
|
||||
predicateMetadataProducer: func(_ *apiv1.Pod, _ map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata {
|
||||
predicateMetadataProducer: func(_ *apiv1.Pod, _ map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
|
@ -172,7 +172,7 @@ func (p *PredicateChecker) IsAffinityPredicateEnabled() bool {
|
|||
// improve the performance of running predicates, especially MatchInterPodAffinity predicate. However, calculating
|
||||
// predicateMetadata is also quite expensive, so it's not always the best option to run this method.
|
||||
// Please refer to https://github.com/kubernetes/autoscaler/issues/257 for more details.
|
||||
func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata {
|
||||
func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
// Skip precomputation if affinity predicate is disabled - it's not worth it performance-wise.
|
||||
if !p.enableAffinityPredicate {
|
||||
return nil
|
||||
|
|
@ -181,7 +181,7 @@ func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[st
|
|||
}
|
||||
|
||||
// FitsAny checks if the given pod can be place on any of the given nodes.
|
||||
func (p *PredicateChecker) FitsAny(pod *apiv1.Pod, nodeInfos map[string]*schedulercache.NodeInfo) (string, error) {
|
||||
func (p *PredicateChecker) FitsAny(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) (string, error) {
|
||||
for name, nodeInfo := range nodeInfos {
|
||||
// Be sure that the node is schedulable.
|
||||
if nodeInfo.Node().Spec.Unschedulable {
|
||||
|
|
@ -268,7 +268,7 @@ func (pe *PredicateError) PredicateName() string {
|
|||
// it was calculated using NodeInfo map representing different cluster state and the
|
||||
// performance gains of CheckPredicates won't always offset the cost of GetPredicateMetadata.
|
||||
// Alternatively you can pass nil as predicateMetadata.
|
||||
func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) *PredicateError {
|
||||
func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError {
|
||||
for _, predInfo := range p.predicates {
|
||||
// Skip affinity predicate if it has been disabled.
|
||||
if !p.enableAffinityPredicate && predInfo.name == affinityPredicateName {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -33,9 +33,9 @@ func TestPredicates(t *testing.T) {
|
|||
p3 := BuildTestPod("p3", 8000, 0)
|
||||
p4 := BuildTestPod("p4", 500, 500000)
|
||||
|
||||
ni1 := schedulercache.NewNodeInfo(p1)
|
||||
ni2 := schedulercache.NewNodeInfo()
|
||||
nodeInfos := map[string]*schedulercache.NodeInfo{
|
||||
ni1 := schedulernodeinfo.NewNodeInfo(p1)
|
||||
ni2 := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfos := map[string]*schedulernodeinfo.NodeInfo{
|
||||
"n1": ni1,
|
||||
"n2": ni2,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,17 +20,17 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// Backoff allows time-based backing off of node groups considered in scale up algorithm
|
||||
type Backoff interface {
|
||||
// Backoff execution for the given node group. Returns time till execution is backed off.
|
||||
Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulercache.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time
|
||||
Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time
|
||||
// IsBackedOff returns true if execution is backed off for the given node group.
|
||||
IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulercache.NodeInfo, currentTime time.Time) bool
|
||||
IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, currentTime time.Time) bool
|
||||
// RemoveBackoff removes backoff data for the given node group.
|
||||
RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulercache.NodeInfo)
|
||||
RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo)
|
||||
// RemoveStaleBackoffData removes stale backoff data.
|
||||
RemoveStaleBackoffData(currentTime time.Time)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// Backoff handles backing off executions.
|
||||
|
|
@ -66,7 +66,7 @@ func NewIdBasedExponentialBackoff(initialBackoffDuration time.Duration, maxBacko
|
|||
}
|
||||
|
||||
// Backoff execution for the given node group. Returns time till execution is backed off.
|
||||
func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulercache.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time {
|
||||
func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, errorClass cloudprovider.InstanceErrorClass, errorCode string, currentTime time.Time) time.Time {
|
||||
duration := b.initialBackoffDuration
|
||||
key := b.nodeGroupKey(nodeGroup)
|
||||
if backoffInfo, found := b.backoffInfo[key]; found {
|
||||
|
|
@ -90,13 +90,13 @@ func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, nodeInfo
|
|||
}
|
||||
|
||||
// IsBackedOff returns true if execution is backed off for the given node group.
|
||||
func (b *exponentialBackoff) IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulercache.NodeInfo, currentTime time.Time) bool {
|
||||
func (b *exponentialBackoff) IsBackedOff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo, currentTime time.Time) bool {
|
||||
backoffInfo, found := b.backoffInfo[b.nodeGroupKey(nodeGroup)]
|
||||
return found && backoffInfo.backoffUntil.After(currentTime)
|
||||
}
|
||||
|
||||
// RemoveBackoff removes backoff data for the given node group.
|
||||
func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulercache.NodeInfo) {
|
||||
func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup, nodeInfo *schedulernodeinfo.NodeInfo) {
|
||||
delete(b.backoffInfo, b.nodeGroupKey(nodeGroup))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,11 +24,11 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// GetDaemonSetPodsForNode returns daemonset nodes for the given pod.
|
||||
func GetDaemonSetPodsForNode(nodeInfo *schedulercache.NodeInfo, daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) []*apiv1.Pod {
|
||||
func GetDaemonSetPodsForNode(nodeInfo *schedulernodeinfo.NodeInfo, daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) []*apiv1.Pod {
|
||||
result := make([]*apiv1.Pod, 0)
|
||||
for _, ds := range daemonsets {
|
||||
pod := newPod(ds, nodeInfo.Node().Name)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -35,7 +35,7 @@ import (
|
|||
func TestGetDaemonSetPodsForNode(t *testing.T) {
|
||||
node := BuildTestNode("node", 1000, 1000)
|
||||
SetNodeReadyState(node, true, time.Now())
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
predicateChecker := simulator.NewTestPredicateChecker()
|
||||
|
|
|
|||
|
|
@ -19,28 +19,28 @@ package scheduler
|
|||
import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
|
||||
// and the values are the aggregated information for that node. Pods waiting lower priority pods preemption
|
||||
// (pod.Status.NominatedNodeName is set) are also added to list of pods for a node.
|
||||
func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]*schedulercache.NodeInfo {
|
||||
nodeNameToNodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]*schedulernodeinfo.NodeInfo {
|
||||
nodeNameToNodeInfo := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
for _, pod := range pods {
|
||||
nodeName := pod.Spec.NodeName
|
||||
if nodeName == "" {
|
||||
nodeName = pod.Status.NominatedNodeName
|
||||
}
|
||||
if _, ok := nodeNameToNodeInfo[nodeName]; !ok {
|
||||
nodeNameToNodeInfo[nodeName] = schedulercache.NewNodeInfo()
|
||||
nodeNameToNodeInfo[nodeName] = schedulernodeinfo.NewNodeInfo()
|
||||
}
|
||||
nodeNameToNodeInfo[nodeName].AddPod(pod)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, ok := nodeNameToNodeInfo[node.Name]; !ok {
|
||||
nodeNameToNodeInfo[node.Name] = schedulercache.NewNodeInfo()
|
||||
nodeNameToNodeInfo[node.Name] = schedulernodeinfo.NewNodeInfo()
|
||||
}
|
||||
nodeNameToNodeInfo[node.Name].SetNode(node)
|
||||
}
|
||||
|
|
@ -60,10 +60,10 @@ func CreateNodeNameToInfoMap(pods []*apiv1.Pod, nodes []*apiv1.Node) map[string]
|
|||
}
|
||||
|
||||
// NodeWithPod function returns NodeInfo, which is a copy of nodeInfo argument with an additional pod scheduled on it.
|
||||
func NodeWithPod(nodeInfo *schedulercache.NodeInfo, pod *apiv1.Pod) *schedulercache.NodeInfo {
|
||||
func NodeWithPod(nodeInfo *schedulernodeinfo.NodeInfo, pod *apiv1.Pod) *schedulernodeinfo.NodeInfo {
|
||||
podsOnNode := nodeInfo.Pods()
|
||||
podsOnNode = append(podsOnNode, pod)
|
||||
newNodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
|
||||
newNodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||
if err := newNodeInfo.SetNode(nodeInfo.Node()); err != nil {
|
||||
klog.Errorf("error setting node for NodeInfo %s, because of %s", nodeInfo.Node().Name, err.Error())
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue