Change parameter order of TemplateNodeInfoProvider
Every other processors (and, I think, function in CA?) that takes AutoscalingContext has it as first parameter. Changing the new processor for consistency.
This commit is contained in:
parent
39b10bf7a7
commit
a0109324a2
|
|
@ -529,7 +529,7 @@ func runSimpleScaleUpTest(t *testing.T, config *scaleTestConfig) *scaleTestResul
|
|||
}
|
||||
context.ExpanderStrategy = expander
|
||||
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(nodes, &context, []*appsv1.DaemonSet{}, nil)
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||
|
||||
|
|
@ -688,7 +688,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
nodes := []*apiv1.Node{n1, n2}
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(nodes, &context, []*appsv1.DaemonSet{}, nil)
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||
p3 := BuildTestPod("p-new", 550, 0)
|
||||
|
|
@ -728,7 +728,7 @@ func TestScaleUpNoHelp(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
nodes := []*apiv1.Node{n1}
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(nodes, &context, []*appsv1.DaemonSet{}, nil)
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||
p3 := BuildTestPod("p-new", 500, 0)
|
||||
|
|
@ -793,7 +793,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
|
|||
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(nodes, &context, []*appsv1.DaemonSet{}, nil)
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
|
||||
|
||||
|
|
@ -861,7 +861,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t, 0}
|
||||
|
||||
nodes := []*apiv1.Node{}
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(nodes, &context, []*appsv1.DaemonSet{}, nil)
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
|
||||
|
||||
scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1}, nodes, []*appsv1.DaemonSet{}, nodeInfos, nil)
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -914,7 +914,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
|
|||
processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t, 2}
|
||||
|
||||
nodes := []*apiv1.Node{}
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(nodes, &context, []*appsv1.DaemonSet{}, nil)
|
||||
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
|
||||
|
||||
scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1, p2, p3}, nodes, []*appsv1.DaemonSet{}, nodeInfos, nil)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
|
|||
return typedErr.AddPrefix("Initialize ClusterSnapshot")
|
||||
}
|
||||
|
||||
nodeInfosForGroups, autoscalerError := a.processors.TemplateNodeInfoProvider.Process(readyNodes, autoscalingContext, daemonsets, a.ignoredTaints)
|
||||
nodeInfosForGroups, autoscalerError := a.processors.TemplateNodeInfoProvider.Process(autoscalingContext, readyNodes, daemonsets, a.ignoredTaints)
|
||||
if autoscalerError != nil {
|
||||
klog.Errorf("Failed to get node infos for groups: %v", autoscalerError)
|
||||
return autoscalerError.AddPrefix("failed to build node infos for node groups: ")
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ func (p *MixedTemplateNodeInfoProvider) CleanUp() {
|
|||
}
|
||||
|
||||
// Process returns the nodeInfos set for this cluster
|
||||
func (p *MixedTemplateNodeInfoProvider) Process(nodes []*apiv1.Node, ctx *context.AutoscalingContext, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) {
|
||||
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
|
||||
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
|
||||
result := make(map[string]*schedulerframework.NodeInfo)
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
|
|||
ListerRegistry: registry,
|
||||
},
|
||||
}
|
||||
res, err := NewMixedTemplateNodeInfoProvider().Process([]*apiv1.Node{unready4, unready3, ready2, ready1}, &ctx, []*appsv1.DaemonSet{}, nil)
|
||||
res, err := NewMixedTemplateNodeInfoProvider().Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, len(res))
|
||||
info, found := res["ng1"]
|
||||
|
|
@ -100,7 +100,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
|
|||
ListerRegistry: registry,
|
||||
},
|
||||
}
|
||||
res, err = NewMixedTemplateNodeInfoProvider().Process([]*apiv1.Node{}, &ctx, []*appsv1.DaemonSet{}, nil)
|
||||
res, err = NewMixedTemplateNodeInfoProvider().Process(&ctx, []*apiv1.Node{}, []*appsv1.DaemonSet{}, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res))
|
||||
}
|
||||
|
|
@ -159,7 +159,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
},
|
||||
}
|
||||
niProcessor := NewMixedTemplateNodeInfoProvider()
|
||||
res, err := niProcessor.Process([]*apiv1.Node{unready4, unready3, ready2, ready1}, &ctx, []*appsv1.DaemonSet{}, nil)
|
||||
res, err := niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil)
|
||||
assert.NoError(t, err)
|
||||
// Check results
|
||||
assert.Equal(t, 4, len(res))
|
||||
|
|
@ -193,7 +193,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
assert.Equal(t, "ng3", lastDeletedGroup)
|
||||
|
||||
// Check cache with all nodes removed
|
||||
res, err = niProcessor.Process([]*apiv1.Node{unready4, unready3, ready2, ready1}, &ctx, []*appsv1.DaemonSet{}, nil)
|
||||
res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil)
|
||||
assert.NoError(t, err)
|
||||
// Check results
|
||||
assert.Equal(t, 2, len(res))
|
||||
|
|
@ -215,7 +215,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
|
|||
infoNg4Node6 := schedulerframework.NewNodeInfo()
|
||||
infoNg4Node6.SetNode(ready6.DeepCopy())
|
||||
niProcessor.nodeInfoCache = map[string]*schedulerframework.NodeInfo{"ng4": infoNg4Node6}
|
||||
res, err = niProcessor.Process([]*apiv1.Node{unready4, unready3, ready2, ready1}, &ctx, []*appsv1.DaemonSet{}, nil)
|
||||
res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil)
|
||||
// Check if cache was used
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(res))
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
// TemplateNodeInfoProvider is provides the initial nodeInfos set.
|
||||
type TemplateNodeInfoProvider interface {
|
||||
// Process returns a map of nodeInfos for node groups.
|
||||
Process(nodes []*apiv1.Node, ctx *context.AutoscalingContext, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError)
|
||||
Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError)
|
||||
// CleanUp cleans up processor's internal structures.
|
||||
CleanUp()
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue