Remove obsolete implementations of node-related processors

This commit is contained in:
Łukasz Osipiuk 2018-09-04 13:34:26 +02:00
parent 0f38558f1c
commit 84d8f6fd31
9 changed files with 101 additions and 451 deletions

View File

@ -17,15 +17,25 @@ limitations under the License.
package core package core
import ( import (
"testing"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/expander/random" "k8s.io/autoscaler/cluster-autoscaler/expander/random"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
kube_client "k8s.io/client-go/kubernetes" kube_client "k8s.io/client-go/kubernetes"
kube_record "k8s.io/client-go/tools/record" kube_record "k8s.io/client-go/tools/record"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
) )
type nodeConfig struct { type nodeConfig struct {
@ -76,5 +86,68 @@ func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClien
PredicateChecker: simulator.NewTestPredicateChecker(), PredicateChecker: simulator.NewTestPredicateChecker(),
ExpanderStrategy: random.NewStrategy(), ExpanderStrategy: random.NewStrategy(),
} }
}
type mockAutoprovisioningNodeGroupManager struct {
t *testing.T
}
func (p *mockAutoprovisioningNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (cloudprovider.NodeGroup, errors.AutoscalerError) {
newNodeGroup, err := nodeGroup.Create()
assert.NoError(p.t, err)
metrics.RegisterNodeGroupCreation()
return newNodeGroup, nil
}
func (p *mockAutoprovisioningNodeGroupManager) RemoveUnneededNodeGroups(context *context.AutoscalingContext) error {
if !context.AutoscalingOptions.NodeAutoprovisioningEnabled {
return nil
}
nodeGroups := context.CloudProvider.NodeGroups()
for _, nodeGroup := range nodeGroups {
if !nodeGroup.Autoprovisioned() {
continue
}
targetSize, err := nodeGroup.TargetSize()
assert.NoError(p.t, err)
if targetSize > 0 {
continue
}
nodes, err := nodeGroup.Nodes()
assert.NoError(p.t, err)
if len(nodes) > 0 {
continue
}
err = nodeGroup.Delete()
assert.NoError(p.t, err)
}
return nil
}
func (p *mockAutoprovisioningNodeGroupManager) CleanUp() {
}
type mockAutoprovisioningNodeGroupListProcessor struct {
t *testing.T
}
func (p *mockAutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulercache.NodeInfo,
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulercache.NodeInfo, error) {
machines, err := context.CloudProvider.GetAvailableMachineTypes()
assert.NoError(p.t, err)
bestLabels := labels.BestLabelSet(unschedulablePods)
for _, machineType := range machines {
nodeGroup, err := context.CloudProvider.NewNodeGroup(machineType, bestLabels, map[string]string{}, []apiv1.Taint{}, map[string]resource.Quantity{})
assert.NoError(p.t, err)
nodeInfo, err := nodeGroup.TemplateNodeInfo()
assert.NoError(p.t, err)
nodeInfos[nodeGroup.Id()] = nodeInfo
nodeGroups = append(nodeGroups, nodeGroup)
}
return nodeGroups, nodeInfos, nil
}
func (p *mockAutoprovisioningNodeGroupListProcessor) CleanUp() {
} }

View File

@ -29,7 +29,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/estimator"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test" . "k8s.io/autoscaler/cluster-autoscaler/utils/test"
"k8s.io/autoscaler/cluster-autoscaler/utils/units" "k8s.io/autoscaler/cluster-autoscaler/utils/units"
kube_record "k8s.io/client-go/tools/record" kube_record "k8s.io/client-go/tools/record"
@ -816,8 +815,8 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder)
processors := ca_processors.TestProcessors() processors := ca_processors.TestProcessors()
processors.NodeGroupListProcessor = nodegroups.NewAutoprovisioningNodeGroupListProcessor() processors.NodeGroupListProcessor = &mockAutoprovisioningNodeGroupListProcessor{t}
processors.NodeGroupManager = nodegroups.NewDefaultNodeGroupManager() processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t}
status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1}, []*apiv1.Node{}, []*extensionsv1.DaemonSet{}) status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1}, []*apiv1.Node{}, []*extensionsv1.DaemonSet{})
assert.NoError(t, err) assert.NoError(t, err)

View File

@ -26,7 +26,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/estimator"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test" . "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@ -286,6 +285,8 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
onScaleDownMock := &onScaleDownMock{} onScaleDownMock := &onScaleDownMock{}
onNodeGroupCreateMock := &onNodeGroupCreateMock{} onNodeGroupCreateMock := &onNodeGroupCreateMock{}
onNodeGroupDeleteMock := &onNodeGroupDeleteMock{} onNodeGroupDeleteMock := &onNodeGroupDeleteMock{}
nodeGroupManager := &mockAutoprovisioningNodeGroupManager{t}
nodeGroupListProcessor := &mockAutoprovisioningNodeGroupListProcessor{t}
n1 := BuildTestNode("n1", 100, 1000) n1 := BuildTestNode("n1", 100, 1000)
SetNodeReadyState(n1, true, time.Now()) SetNodeReadyState(n1, true, time.Now())
@ -328,7 +329,8 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
assert.NotNil(t, provider) assert.NotNil(t, provider)
processors := ca_processors.TestProcessors() processors := ca_processors.TestProcessors()
processors.NodeGroupListProcessor = nodegroups.NewAutoprovisioningNodeGroupListProcessor() processors.NodeGroupManager = nodeGroupManager
processors.NodeGroupListProcessor = nodeGroupListProcessor
// Create context with mocked lister registry. // Create context with mocked lister registry.
options := config.AutoscalingOptions{ options := config.AutoscalingOptions{

View File

@ -1,101 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodegroups
import (
"github.com/golang/glog"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
)
// AutoprovisioningNodeGroupManager is responsible for creating/deleting autoprovisioned node groups.
type AutoprovisioningNodeGroupManager struct {
}
// NewAutoprovisioningNodeGroupManager creates an instance of NodeGroupManager.
func NewAutoprovisioningNodeGroupManager() NodeGroupManager {
return &AutoprovisioningNodeGroupManager{}
}
// CreateNodeGroup creates autoprovisioned node group.
func (p *AutoprovisioningNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (cloudprovider.NodeGroup, errors.AutoscalerError) {
if !context.AutoscalingOptions.NodeAutoprovisioningEnabled {
return nil, errors.NewAutoscalerError(errors.InternalError, "tried to create a node group %s, but autoprovisioning is disabled", nodeGroup.Id())
}
oldId := nodeGroup.Id()
newNodeGroup, err := nodeGroup.Create()
if err != nil {
context.LogRecorder.Eventf(apiv1.EventTypeWarning, "FailedToCreateNodeGroup",
"NodeAutoprovisioning: attempt to create node group %v failed: %v", oldId, err)
// TODO(maciekpytel): add some metric here after figuring out failure scenarios
return nil, errors.ToAutoscalerError(errors.CloudProviderError, err)
}
newId := newNodeGroup.Id()
if newId != oldId {
glog.V(2).Infof("Created node group %s based on template node group %s, will use new node group in scale-up", newId, oldId)
}
context.LogRecorder.Eventf(apiv1.EventTypeNormal, "CreatedNodeGroup",
"NodeAutoprovisioning: created new node group %v", newId)
metrics.RegisterNodeGroupCreation()
return newNodeGroup, nil
}
// RemoveUnneededNodeGroups removes node groups that are not needed anymore.
func (p *AutoprovisioningNodeGroupManager) RemoveUnneededNodeGroups(context *context.AutoscalingContext) error {
if !context.AutoscalingOptions.NodeAutoprovisioningEnabled {
return nil
}
nodeGroups := context.CloudProvider.NodeGroups()
for _, nodeGroup := range nodeGroups {
if !nodeGroup.Autoprovisioned() {
continue
}
targetSize, err := nodeGroup.TargetSize()
if err != nil {
return err
}
if targetSize > 0 {
continue
}
nodes, err := nodeGroup.Nodes()
if err != nil {
return err
}
if len(nodes) > 0 {
continue
}
ngId := nodeGroup.Id()
if err := nodeGroup.Delete(); err != nil {
context.LogRecorder.Eventf(apiv1.EventTypeWarning, "FailedToDeleteNodeGroup",
"NodeAutoprovisioning: attempt to delete node group %v failed: %v", ngId, err)
// TODO(maciekpytel): add some metric here after figuring out failure scenarios
return err
}
context.LogRecorder.Eventf(apiv1.EventTypeNormal, "DeletedNodeGroup",
"NodeAutoprovisioning: removed node group %v", ngId)
metrics.RegisterNodeGroupDeletion()
}
return nil
}
// CleanUp cleans up the processor's internal structures.
func (p *AutoprovisioningNodeGroupManager) CleanUp() {
}

View File

@ -1,122 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodegroups
import (
"fmt"
"testing"
"github.com/golang/glog"
"github.com/stretchr/testify/assert"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
"k8s.io/client-go/kubernetes/fake"
kube_record "k8s.io/client-go/tools/record"
)
func TestAutoprovisioningNodeGroupManager(t *testing.T) {
manager := NewAutoprovisioningNodeGroupManager()
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
tests := []struct {
name string
createNodeGroupErr error
wantError bool
}{
{
name: "create node group",
},
{
name: "failed to create node group",
createNodeGroupErr: fmt.Errorf("some error"),
wantError: true,
},
}
for _, tc := range tests {
provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil,
func(string) error { return tc.createNodeGroupErr }, nil, nil, nil)
context := &context.AutoscalingContext{
AutoscalingOptions: config.AutoscalingOptions{
NodeAutoprovisioningEnabled: true,
},
CloudProvider: provider,
AutoscalingKubeClients: context.AutoscalingKubeClients{
LogRecorder: fakeLogRecorder,
},
}
nodeGroup, err := provider.NewNodeGroup("T1", nil, nil, nil, nil)
assert.NoError(t, err)
_, err = manager.CreateNodeGroup(context, nodeGroup)
if tc.wantError {
if err == nil {
glog.Errorf("%s: Got no error, want error", tc.name)
}
} else {
if err != nil {
glog.Errorf("%s: Unexpected error %v", tc.name, err)
}
if len(provider.NodeGroups()) != 1 {
glog.Errorf("%s: Unexpected number of node groups %d, want 1", tc.name, len(provider.NodeGroups()))
}
}
}
}
func TestRemoveUnneededNodeGroups(t *testing.T) {
manager := NewAutoprovisioningNodeGroupManager()
n1 := BuildTestNode("n1", 1000, 1000)
n2 := BuildTestNode("n2", 1000, 1000)
provider := testprovider.NewTestAutoprovisioningCloudProvider(
nil, nil,
nil, func(id string) error {
if id == "ng2" {
return nil
}
return fmt.Errorf("Node group %s shouldn't be deleted", id)
},
nil, nil)
assert.NotNil(t, provider)
provider.AddNodeGroup("ng1", 1, 10, 1)
provider.AddAutoprovisionedNodeGroup("ng2", 0, 10, 0, "mt1")
provider.AddAutoprovisionedNodeGroup("ng3", 0, 10, 1, "mt1")
provider.AddAutoprovisionedNodeGroup("ng4", 0, 10, 0, "mt1")
provider.AddNode("ng3", n1)
provider.AddNode("ng4", n2)
fakeClient := &fake.Clientset{}
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &context.AutoscalingContext{
AutoscalingOptions: config.AutoscalingOptions{
NodeAutoprovisioningEnabled: true,
},
CloudProvider: provider,
AutoscalingKubeClients: context.AutoscalingKubeClients{
LogRecorder: fakeLogRecorder,
},
}
assert.NoError(t, manager.RemoveUnneededNodeGroups(context))
}

View File

@ -1,117 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodegroups
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
"k8s.io/autoscaler/cluster-autoscaler/utils/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"github.com/golang/glog"
)
// AutoprovisioningNodeGroupListProcessor adds autoprovisioning candidates to consider in scale-up.
type AutoprovisioningNodeGroupListProcessor struct {
}
// NewAutoprovisioningNodeGroupListProcessor creates an instance of NodeGroupListProcessor.
func NewAutoprovisioningNodeGroupListProcessor() NodeGroupListProcessor {
return &AutoprovisioningNodeGroupListProcessor{}
}
// Process processes lists of unschedulable and sheduled pods before scaling of the cluster.
func (p *AutoprovisioningNodeGroupListProcessor) Process(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup, nodeInfos map[string]*schedulercache.NodeInfo,
unschedulablePods []*apiv1.Pod) ([]cloudprovider.NodeGroup, map[string]*schedulercache.NodeInfo, error) {
if !context.NodeAutoprovisioningEnabled {
return nodeGroups, nodeInfos, nil
}
autoprovisionedNodeGroupCount := 0
for _, group := range nodeGroups {
if group.Autoprovisioned() {
autoprovisionedNodeGroupCount++
}
}
if autoprovisionedNodeGroupCount >= context.MaxAutoprovisionedNodeGroupCount {
glog.V(4).Infof("Max autoprovisioned node group count reached")
return nodeGroups, nodeInfos, nil
}
newGroupsCount := 0
newNodeGroups := addAllMachineTypesForConfig(context, map[string]string{}, map[string]resource.Quantity{},
nodeInfos, unschedulablePods)
newGroupsCount += len(newNodeGroups)
nodeGroups = append(nodeGroups, newNodeGroups...)
gpuRequests := gpu.GetGpuRequests(unschedulablePods)
for _, gpuRequestInfo := range gpuRequests {
glog.V(4).Info("Adding node groups using GPU to NAP simulations")
extraResources := map[string]resource.Quantity{
gpu.ResourceNvidiaGPU: gpuRequestInfo.MaxRequest,
}
newNodeGroups := addAllMachineTypesForConfig(context, gpuRequestInfo.SystemLabels, extraResources,
nodeInfos, gpuRequestInfo.Pods)
newGroupsCount += len(newNodeGroups)
nodeGroups = append(nodeGroups, newNodeGroups...)
}
glog.V(4).Infof("Considering %v potential node groups in NAP simulations", newGroupsCount)
return nodeGroups, nodeInfos, nil
}
func addAllMachineTypesForConfig(context *context.AutoscalingContext, systemLabels map[string]string, extraResources map[string]resource.Quantity,
nodeInfos map[string]*schedulercache.NodeInfo, unschedulablePods []*apiv1.Pod) []cloudprovider.NodeGroup {
nodeGroups := make([]cloudprovider.NodeGroup, 0)
machines, err := context.CloudProvider.GetAvailableMachineTypes()
if err != nil {
glog.Warningf("Failed to get machine types: %v", err)
return nodeGroups
}
bestLabels := labels.BestLabelSet(unschedulablePods)
taints := make([]apiv1.Taint, 0)
for _, machineType := range machines {
nodeGroup, err := context.CloudProvider.NewNodeGroup(machineType, bestLabels, systemLabels, taints, extraResources)
if err != nil {
// We don't check if a given node group setup is allowed.
// It's fine if it isn't, just don't consider it an option.
if err != cloudprovider.ErrIllegalConfiguration {
glog.Warningf("Unable to build temporary node group for %s: %v", machineType, err)
}
continue
}
nodeInfo, err := nodeGroup.TemplateNodeInfo()
if err != nil {
glog.Warningf("Unable to build template for node group for %s: %v", nodeGroup.Id(), err)
continue
}
nodeInfos[nodeGroup.Id()] = nodeInfo
nodeGroups = append(nodeGroups, nodeGroup)
}
return nodeGroups
}
// CleanUp cleans up the processor's internal structures.
func (p *AutoprovisioningNodeGroupListProcessor) CleanUp() {
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodegroups
import (
"testing"
apiv1 "k8s.io/api/core/v1"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"github.com/stretchr/testify/assert"
)
func TestAutoprovisioningNGLProcessor(t *testing.T) {
processor := NewAutoprovisioningNodeGroupListProcessor()
t1 := BuildTestNode("t1", 4000, 1000000)
ti1 := schedulercache.NewNodeInfo()
ti1.SetNode(t1)
p1 := BuildTestPod("p1", 100, 100)
n1 := BuildTestNode("ng1-xxx", 4000, 1000000)
ni1 := schedulercache.NewNodeInfo()
ni1.SetNode(n1)
provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil,
nil, nil,
[]string{"T1"}, map[string]*schedulercache.NodeInfo{"T1": ti1})
provider.AddNodeGroup("ng1", 1, 5, 3)
context := &context.AutoscalingContext{
AutoscalingOptions: config.AutoscalingOptions{
MaxAutoprovisionedNodeGroupCount: 1,
NodeAutoprovisioningEnabled: true,
},
CloudProvider: provider,
}
nodeGroups := provider.NodeGroups()
nodeInfos := map[string]*schedulercache.NodeInfo{
"ng1": ni1,
}
var err error
nodeGroups, nodeInfos, err = processor.Process(context, nodeGroups, nodeInfos, []*apiv1.Pod{p1})
assert.NoError(t, err)
assert.Equal(t, 2, len(nodeGroups))
assert.Equal(t, 2, len(nodeInfos))
}
func TestAutoprovisioningNGLProcessorTooMany(t *testing.T) {
processor := NewAutoprovisioningNodeGroupListProcessor()
t1 := BuildTestNode("T1-abc", 4000, 1000000)
ti1 := schedulercache.NewNodeInfo()
ti1.SetNode(t1)
x1 := BuildTestNode("X1-cde", 4000, 1000000)
xi1 := schedulercache.NewNodeInfo()
xi1.SetNode(x1)
p1 := BuildTestPod("p1", 100, 100)
provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil,
nil, nil,
[]string{"T1", "X1"},
map[string]*schedulercache.NodeInfo{"T1": ti1, "X1": xi1})
provider.AddAutoprovisionedNodeGroup("autoprovisioned-X1", 0, 1000, 0, "X1")
context := &context.AutoscalingContext{
AutoscalingOptions: config.AutoscalingOptions{
MaxAutoprovisionedNodeGroupCount: 1,
NodeAutoprovisioningEnabled: true,
},
CloudProvider: provider,
}
nodeGroups := provider.NodeGroups()
nodeInfos := map[string]*schedulercache.NodeInfo{"X1": xi1}
var err error
nodeGroups, nodeInfos, err = processor.Process(context, nodeGroups, nodeInfos, []*apiv1.Pod{p1})
assert.NoError(t, err)
assert.Equal(t, 1, len(nodeGroups))
assert.Equal(t, 1, len(nodeInfos))
}

View File

@ -37,8 +37,7 @@ type NoOpNodeGroupListProcessor struct {
// NewDefaultNodeGroupListProcessor creates an instance of NodeGroupListProcessor. // NewDefaultNodeGroupListProcessor creates an instance of NodeGroupListProcessor.
func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor { func NewDefaultNodeGroupListProcessor() NodeGroupListProcessor {
// TODO(maciekpytel): Use a better default return &NoOpNodeGroupListProcessor{}
return NewAutoprovisioningNodeGroupListProcessor()
} }
// Process processes lists of unschedulable and sheduled pods before scaling of the cluster. // Process processes lists of unschedulable and sheduled pods before scaling of the cluster.

View File

@ -29,7 +29,26 @@ type NodeGroupManager interface {
CleanUp() CleanUp()
} }
// NoOpNodeGroupManager is a no-op implementation of NodeGroupManager.
// It does not remove any node groups and its CreateNodeGroup method always returns an error.
// To be used together with NoOpNodeGroupListProcessor.
type NoOpNodeGroupManager struct {
}
// CreateNodeGroup always returns internal error. It must not be called on NoOpNodeGroupManager.
func (*NoOpNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (cloudprovider.NodeGroup, errors.AutoscalerError) {
return nil, errors.NewAutoscalerError(errors.InternalError, "not implemented")
}
// RemoveUnneededNodeGroups does nothing in NoOpNodeGroupManager
func (*NoOpNodeGroupManager) RemoveUnneededNodeGroups(context *context.AutoscalingContext) error {
return nil
}
// CleanUp does nothing in NoOpNodeGroupManager
func (*NoOpNodeGroupManager) CleanUp() {}
// NewDefaultNodeGroupManager creates an instance of NodeGroupManager. // NewDefaultNodeGroupManager creates an instance of NodeGroupManager.
func NewDefaultNodeGroupManager() NodeGroupManager { func NewDefaultNodeGroupManager() NodeGroupManager {
return NewAutoprovisioningNodeGroupManager() return &NoOpNodeGroupManager{}
} }