unexporting methods in utils.go

This commit is contained in:
Vivek Bagade 2019-01-22 21:27:56 +05:30
parent 0a82f0e548
commit 79ef3a6940
7 changed files with 40 additions and 40 deletions

View File

@ -358,7 +358,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(
currentlyUnneededNodes := make([]*apiv1.Node, 0)
// Only scheduled non expendable pods and pods waiting for lower priority pods preemption can prevent node delete.
nonExpendablePods := FilterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
nonExpendablePods := filterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(nonExpendablePods, nodes)
utilizationMap := make(map[string]simulator.UtilizationInfo)
@ -729,7 +729,7 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p
findNodesToRemoveStart := time.Now()
// Only scheduled non expendable pods are taken into account and have to be moved.
nonExpendablePods := FilterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
nonExpendablePods := filterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
// We look for only 1 node so new hints may be incomplete.
nodesToRemove, _, _, err := simulator.FindNodesToRemove(candidates, nodesWithoutMaster, nonExpendablePods, sd.context.ListerRegistry,
sd.context.PredicateChecker, 1, false,

View File

@ -264,7 +264,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto
}
glogx.V(1).Over(loggingQuota).Infof("%v other pods are also unschedulable", -loggingQuota.Left())
nodesFromNotAutoscaledGroups, err := FilterOutNodesFromNotAutoscaledGroups(nodes, context.CloudProvider)
nodesFromNotAutoscaledGroups, err := filterOutNodesFromNotAutoscaledGroups(nodes, context.CloudProvider)
if err != nil {
return &status.ScaleUpStatus{Result: status.ScaleUpError}, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ")
}
@ -447,7 +447,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto
// If possible replace candidate node-info with node info based on crated node group. The latter
// one should be more in line with nodes which will be created by node group.
mainCreatedNodeInfo, err := GetNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, context.PredicateChecker)
mainCreatedNodeInfo, err := getNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, context.PredicateChecker)
if err == nil {
nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo
} else {
@ -461,7 +461,7 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto
}
for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups {
nodeInfo, err := GetNodeInfoFromTemplate(nodeGroup, daemonSets, context.PredicateChecker)
nodeInfo, err := getNodeInfoFromTemplate(nodeGroup, daemonSets, context.PredicateChecker)
if err != nil {
klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), err)
@ -569,7 +569,7 @@ func getPodsPredicatePassingCheckFunctions(
podsPassing := make([]*apiv1.Pod, 0)
podsNotPassing := make(map[*apiv1.Pod]status.Reasons)
schedulableOnNode := CheckPodsSchedulableOnNode(context, unschedulablePods, nodeGroupId, nodeInfo)
schedulableOnNode := checkPodsSchedulableOnNode(context, unschedulablePods, nodeGroupId, nodeInfo)
for pod, err := range schedulableOnNode {
if err == nil {
podsPassing = append(podsPassing, pod)

View File

@ -414,7 +414,7 @@ func simpleScaleUpTest(t *testing.T, config *scaleTestConfig) {
}
context.ExpanderStrategy = expander
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
@ -501,7 +501,7 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
nodes := []*apiv1.Node{n1, n2}
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState := clusterstate.NewClusterStateRegistry(
provider,
clusterstate.ClusterStateRegistryConfig{MaxNodeProvisionTime: 5 * time.Minute},
@ -547,7 +547,7 @@ func TestScaleUpNodeComingHasScale(t *testing.T) {
context := NewScaleTestAutoscalingContext(defaultOptions, &fake.Clientset{}, listers, provider)
nodes := []*apiv1.Node{n1, n2}
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState := clusterstate.NewClusterStateRegistry(
provider,
clusterstate.ClusterStateRegistryConfig{
@ -601,7 +601,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
nodes := []*apiv1.Node{n1, n2}
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
p3 := BuildTestPod("p-new", 550, 0)
@ -640,7 +640,7 @@ func TestScaleUpNoHelp(t *testing.T) {
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
nodes := []*apiv1.Node{n1}
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
p3 := BuildTestPod("p-new", 500, 0)
@ -704,7 +704,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
}
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
@ -769,7 +769,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t}
nodes := []*apiv1.Node{}
nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, context.ListerRegistry, []*appsv1.DaemonSet{}, context.PredicateChecker)
nodeInfos, _ := getNodeInfosForGroups(nodes, provider, context.ListerRegistry, []*appsv1.DaemonSet{}, context.PredicateChecker)
scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1}, nodes, []*appsv1.DaemonSet{}, nodeInfos)
assert.NoError(t, err)

View File

@ -149,7 +149,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
return errors.ToAutoscalerError(errors.ApiCallError, err)
}
nodeInfosForGroups, autoscalerError := GetNodeInfosForGroups(readyNodes, autoscalingContext.CloudProvider, autoscalingContext.ListerRegistry,
nodeInfosForGroups, autoscalerError := getNodeInfosForGroups(readyNodes, autoscalingContext.CloudProvider, autoscalingContext.ListerRegistry,
daemonsets, autoscalingContext.PredicateChecker)
if err != nil {
return autoscalerError.AddPrefix("failed to build node infos for node groups: ")
@ -278,7 +278,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
// Some unschedulable pods can be waiting for lower priority pods preemption so they have nominated node to run.
// Such pods don't require scale up but should be considered during scale down.
unschedulablePods, unschedulableWaitingForLowerPriorityPreemption := FilterOutExpendableAndSplit(unschedulablePodsWithoutTPUs, a.ExpendablePodsPriorityCutoff)
unschedulablePods, unschedulableWaitingForLowerPriorityPreemption := filterOutExpendableAndSplit(unschedulablePodsWithoutTPUs, a.ExpendablePodsPriorityCutoff)
klog.V(4).Infof("Filtering out schedulables")
filterOutSchedulableStart := time.Now()
@ -547,7 +547,7 @@ func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosF
func (a *StaticAutoscaler) onEmptyCluster(status string, emitEvent bool) {
klog.Warningf(status)
a.scaleDown.CleanUpUnneededNodes()
UpdateEmptyClusterStateMetrics()
updateEmptyClusterStateMetrics()
if a.AutoscalingContext.WriteStatusConfigMap {
utils.WriteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, status, a.AutoscalingContext.LogRecorder)
}

View File

@ -512,7 +512,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
// broken node detected as unregistered
nodes := []*apiv1.Node{n1}
//nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listerRegistry, []*appsv1.DaemonSet{}, context.PredicateChecker)
//nodeInfos, _ := getNodeInfosForGroups(nodes, provider, listerRegistry, []*appsv1.DaemonSet{}, context.PredicateChecker)
clusterState.UpdateNodes(nodes, nil, now)
// broken node failed to register in time

View File

@ -111,8 +111,8 @@ func (podMap podSchedulableMap) set(pod *apiv1.Pod, err *simulator.PredicateErro
// It takes into account pods that are bound to node and will be scheduled after lower priority pod preemption.
func FilterOutSchedulable(unschedulableCandidates []*apiv1.Pod, nodes []*apiv1.Node, allScheduled []*apiv1.Pod, podsWaitingForLowerPriorityPreemption []*apiv1.Pod,
predicateChecker *simulator.PredicateChecker, expendablePodsPriorityCutoff int) []*apiv1.Pod {
unschedulablePods := []*apiv1.Pod{}
nonExpendableScheduled := FilterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
var unschedulablePods []*apiv1.Pod
nonExpendableScheduled := filterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(append(nonExpendableScheduled, podsWaitingForLowerPriorityPreemption...), nodes)
podSchedulable := make(podSchedulableMap)
loggingQuota := glogx.PodsLoggingQuota()
@ -147,10 +147,10 @@ func FilterOutSchedulable(unschedulableCandidates []*apiv1.Pod, nodes []*apiv1.N
return unschedulablePods
}
// FilterOutExpendableAndSplit filters out expendable pods and splits into:
// filterOutExpendableAndSplit filters out expendable pods and splits into:
// - waiting for lower priority pods preemption
// - other pods.
func FilterOutExpendableAndSplit(unschedulableCandidates []*apiv1.Pod, expendablePodsPriorityCutoff int) ([]*apiv1.Pod, []*apiv1.Pod) {
func filterOutExpendableAndSplit(unschedulableCandidates []*apiv1.Pod, expendablePodsPriorityCutoff int) ([]*apiv1.Pod, []*apiv1.Pod) {
unschedulableNonExpendable := []*apiv1.Pod{}
waitingForLowerPriorityPreemption := []*apiv1.Pod{}
for _, pod := range unschedulableCandidates {
@ -166,8 +166,8 @@ func FilterOutExpendableAndSplit(unschedulableCandidates []*apiv1.Pod, expendabl
return unschedulableNonExpendable, waitingForLowerPriorityPreemption
}
// FilterOutExpendablePods filters out expendable pods.
func FilterOutExpendablePods(pods []*apiv1.Pod, expendablePodsPriorityCutoff int) []*apiv1.Pod {
// filterOutExpendablePods filters out expendable pods.
func filterOutExpendablePods(pods []*apiv1.Pod, expendablePodsPriorityCutoff int) []*apiv1.Pod {
result := []*apiv1.Pod{}
for _, pod := range pods {
if pod.Spec.Priority == nil || int(*pod.Spec.Priority) >= expendablePodsPriorityCutoff {
@ -177,8 +177,8 @@ func FilterOutExpendablePods(pods []*apiv1.Pod, expendablePodsPriorityCutoff int
return result
}
// CheckPodsSchedulableOnNode checks if pods can be scheduled on the given node.
func CheckPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*apiv1.Pod, nodeGroupId string, nodeInfo *schedulercache.NodeInfo) map[*apiv1.Pod]*simulator.PredicateError {
// checkPodsSchedulableOnNode checks if pods can be scheduled on the given node.
func checkPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*apiv1.Pod, nodeGroupId string, nodeInfo *schedulercache.NodeInfo) map[*apiv1.Pod]*simulator.PredicateError {
schedulingErrors := map[*apiv1.Pod]*simulator.PredicateError{}
loggingQuota := glogx.PodsLoggingQuota()
podSchedulable := make(podSchedulableMap)
@ -213,11 +213,11 @@ func CheckPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*api
return schedulingErrors
}
// GetNodeInfosForGroups finds NodeInfos for all node groups used to manage the given nodes. It also returns a node group to sample node mapping.
// getNodeInfosForGroups finds NodeInfos for all node groups used to manage the given nodes. It also returns a node group to sample node mapping.
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
//
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider, listers kube_util.ListerRegistry,
func getNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider, listers kube_util.ListerRegistry,
daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (map[string]*schedulercache.NodeInfo, errors.AutoscalerError) {
result := make(map[string]*schedulercache.NodeInfo)
@ -265,7 +265,7 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.Clou
// No good template, trying to generate one. This is called only if there are no
// working nodes in the node groups. By default CA tries to use a real-world example.
nodeInfo, err := GetNodeInfoFromTemplate(nodeGroup, daemonsets, predicateChecker)
nodeInfo, err := getNodeInfoFromTemplate(nodeGroup, daemonsets, predicateChecker)
if err != nil {
if err == cloudprovider.ErrNotImplemented {
continue
@ -299,8 +299,8 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.Clou
return result, nil
}
// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (*schedulercache.NodeInfo, errors.AutoscalerError) {
// getNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo().
func getNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (*schedulercache.NodeInfo, errors.AutoscalerError) {
id := nodeGroup.Id()
baseNodeInfo, err := nodeGroup.TemplateNodeInfo()
if err != nil {
@ -318,9 +318,9 @@ func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*ap
return sanitizedNodeInfo, nil
}
// FilterOutNodesFromNotAutoscaledGroups return subset of input nodes for which cloud provider does not
// filterOutNodesFromNotAutoscaledGroups return subset of input nodes for which cloud provider does not
// return autoscaled node group.
func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider) ([]*apiv1.Node, errors.AutoscalerError) {
func filterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider) ([]*apiv1.Node, errors.AutoscalerError) {
result := make([]*apiv1.Node, 0)
for _, node := range nodes {
@ -595,9 +595,9 @@ func getOldestCreateTimeWithGpu(pods []*apiv1.Pod) (bool, time.Time) {
return gpuFound, oldest
}
// UpdateEmptyClusterStateMetrics updates metrics related to empty cluster's state.
// updateEmptyClusterStateMetrics updates metrics related to empty cluster's state.
// TODO(aleksandra-malinowska): use long unregistered value from ClusterStateRegistry.
func UpdateEmptyClusterStateMetrics() {
func updateEmptyClusterStateMetrics() {
metrics.UpdateClusterSafeToAutoscale(false)
metrics.UpdateNodesCount(0, 0, 0, 0, 0)
}

View File

@ -204,7 +204,7 @@ func TestFilterOutExpendableAndSplit(t *testing.T) {
podWaitingForPreemption2.Spec.Priority = &priority100
podWaitingForPreemption2.Annotations = map[string]string{scheduler_util.NominatedNodeAnnotationKey: "node1"}
res1, res2 := FilterOutExpendableAndSplit([]*apiv1.Pod{p1, p2, podWaitingForPreemption1, podWaitingForPreemption2}, 0)
res1, res2 := filterOutExpendableAndSplit([]*apiv1.Pod{p1, p2, podWaitingForPreemption1, podWaitingForPreemption2}, 0)
assert.Equal(t, 2, len(res1))
assert.Equal(t, p1, res1[0])
assert.Equal(t, p2, res1[1])
@ -212,7 +212,7 @@ func TestFilterOutExpendableAndSplit(t *testing.T) {
assert.Equal(t, podWaitingForPreemption1, res2[0])
assert.Equal(t, podWaitingForPreemption2, res2[1])
res1, res2 = FilterOutExpendableAndSplit([]*apiv1.Pod{p1, p2, podWaitingForPreemption1, podWaitingForPreemption2}, 10)
res1, res2 = filterOutExpendableAndSplit([]*apiv1.Pod{p1, p2, podWaitingForPreemption1, podWaitingForPreemption2}, 10)
assert.Equal(t, 1, len(res1))
assert.Equal(t, p2, res1[0])
assert.Equal(t, 1, len(res2))
@ -233,7 +233,7 @@ func TestFilterOutExpendablePods(t *testing.T) {
podWaitingForPreemption2.Spec.Priority = &priority2
podWaitingForPreemption2.Annotations = map[string]string{scheduler_util.NominatedNodeAnnotationKey: "node1"}
res := FilterOutExpendablePods([]*apiv1.Pod{p1, p2, podWaitingForPreemption1, podWaitingForPreemption2}, 0)
res := filterOutExpendablePods([]*apiv1.Pod{p1, p2, podWaitingForPreemption1, podWaitingForPreemption2}, 0)
assert.Equal(t, 3, len(res))
assert.Equal(t, p1, res[0])
assert.Equal(t, p2, res[1])
@ -279,7 +279,7 @@ func TestFilterSchedulablePodsForNode(t *testing.T) {
PredicateChecker: simulator.NewTestPredicateChecker(),
}
res := CheckPodsSchedulableOnNode(context, unschedulablePods, "T1-abc", tni)
res := checkPodsSchedulableOnNode(context, unschedulablePods, "T1-abc", tni)
wantedSchedulable := []*apiv1.Pod{p1, p3_1, p3_2}
wantedUnschedulable := []*apiv1.Pod{p2_1, p2_2}
@ -341,7 +341,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
predicateChecker := simulator.NewTestPredicateChecker()
res, err := GetNodeInfosForGroups([]*apiv1.Node{n1, n2, n3, n4}, provider1, registry,
res, err := getNodeInfosForGroups([]*apiv1.Node{n1, n2, n3, n4}, provider1, registry,
[]*appsv1.DaemonSet{}, predicateChecker)
assert.NoError(t, err)
assert.Equal(t, 4, len(res))
@ -355,7 +355,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
assert.True(t, found)
// Test for a nodegroup without nodes and TemplateNodeInfo not implemented by cloud proivder
res, err = GetNodeInfosForGroups([]*apiv1.Node{}, provider2, registry,
res, err = getNodeInfosForGroups([]*apiv1.Node{}, provider2, registry,
[]*appsv1.DaemonSet{}, predicateChecker)
assert.NoError(t, err)
assert.Equal(t, 0, len(res))