Clean up NodeGroupConfigProcessor interface

This commit is contained in:
Bartłomiej Wróblewski 2023-08-03 11:22:29 +00:00
parent 9adef5af0e
commit e39d1b028d
13 changed files with 83 additions and 88 deletions

View File

@ -36,5 +36,5 @@ type defultMaxNodeProvisionTimeProvider struct {
// GetMaxNodeProvisionTime returns MaxNodeProvisionTime value that should be used for the given NodeGroup.
func (p *defultMaxNodeProvisionTimeProvider) GetMaxNodeProvisionTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
return p.nodeGroupConfigProcessor.GetMaxNodeProvisionTime(p.context, nodeGroup)
return p.nodeGroupConfigProcessor.GetMaxNodeProvisionTime(nodeGroup)
}

View File

@ -94,7 +94,7 @@ func NewAutoscaler(opts AutoscalerOptions) (Autoscaler, errors.AutoscalerError)
// Initialize default options if not provided.
func initializeDefaultOptions(opts *AutoscalerOptions) error {
if opts.Processors == nil {
opts.Processors = ca_processors.DefaultProcessors()
opts.Processors = ca_processors.DefaultProcessors(opts.AutoscalingOptions)
}
if opts.AutoscalingKubeClients == nil {
opts.AutoscalingKubeClients = context.NewAutoscalingKubeClients(opts.AutoscalingOptions, opts.KubeClient, opts.EventsKubeClient)

View File

@ -59,7 +59,7 @@ type Actuator struct {
// from NodeGroupConfigProcessor interface
type actuatorNodeGroupConfigGetter interface {
// GetIgnoreDaemonSetsUtilization returns IgnoreDaemonSetsUtilization value that should be used for a given NodeGroup.
GetIgnoreDaemonSetsUtilization(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (bool, error)
GetIgnoreDaemonSetsUtilization(nodeGroup cloudprovider.NodeGroup) (bool, error)
}
// NewActuator returns a new instance of Actuator.
@ -274,7 +274,7 @@ func (a *Actuator) scaleDownNodeToReport(node *apiv1.Node, drain bool) (*status.
return nil, err
}
ignoreDaemonSetsUtilization, err := a.configGetter.GetIgnoreDaemonSetsUtilization(a.ctx, nodeGroup)
ignoreDaemonSetsUtilization, err := a.configGetter.GetIgnoreDaemonSetsUtilization(nodeGroup)
if err != nil {
return nil, err
}

View File

@ -979,7 +979,7 @@ func TestStartDeletion(t *testing.T) {
ctx: &ctx, clusterState: csr, nodeDeletionTracker: ndt,
nodeDeletionScheduler: NewGroupDeletionScheduler(&ctx, ndt, ndb, evictor),
budgetProcessor: budgets.NewScaleDownBudgetProcessor(&ctx),
configGetter: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(),
configGetter: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(ctx.NodeGroupDefaults),
}
gotStatus, gotErr := actuator.StartDeletion(allEmptyNodes, allDrainNodes)
if diff := cmp.Diff(tc.wantErr, gotErr, cmpopts.EquateErrors()); diff != "" {

View File

@ -46,11 +46,11 @@ type Checker struct {
type nodeGroupConfigGetter interface {
// GetScaleDownUtilizationThreshold returns ScaleDownUtilizationThreshold value that should be used for a given NodeGroup.
GetScaleDownUtilizationThreshold(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (float64, error)
GetScaleDownUtilizationThreshold(nodeGroup cloudprovider.NodeGroup) (float64, error)
// GetScaleDownGpuUtilizationThreshold returns ScaleDownGpuUtilizationThreshold value that should be used for a given NodeGroup.
GetScaleDownGpuUtilizationThreshold(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (float64, error)
GetScaleDownGpuUtilizationThreshold(nodeGroup cloudprovider.NodeGroup) (float64, error)
// GetIgnoreDaemonSetsUtilization returns IgnoreDaemonSetsUtilization value that should be used for a given NodeGroup.
GetIgnoreDaemonSetsUtilization(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (bool, error)
GetIgnoreDaemonSetsUtilization(nodeGroup cloudprovider.NodeGroup) (bool, error)
}
// NewChecker creates a new Checker object.
@ -132,7 +132,7 @@ func (c *Checker) unremovableReasonAndNodeUtilization(context *context.Autoscali
return simulator.NotAutoscaled, nil
}
ignoreDaemonSetsUtilization, err := c.configGetter.GetIgnoreDaemonSetsUtilization(context, nodeGroup)
ignoreDaemonSetsUtilization, err := c.configGetter.GetIgnoreDaemonSetsUtilization(nodeGroup)
if err != nil {
klog.Warningf("Couldn't retrieve `IgnoreDaemonSetsUtilization` option for node %v: %v", node.Name, err)
return simulator.UnexpectedError, nil
@ -174,12 +174,12 @@ func (c *Checker) isNodeBelowUtilizationThreshold(context *context.AutoscalingCo
var err error
gpuConfig := context.CloudProvider.GetNodeGpuConfig(node)
if gpuConfig != nil {
threshold, err = c.configGetter.GetScaleDownGpuUtilizationThreshold(context, nodeGroup)
threshold, err = c.configGetter.GetScaleDownGpuUtilizationThreshold(nodeGroup)
if err != nil {
return false, err
}
} else {
threshold, err = c.configGetter.GetScaleDownUtilizationThreshold(context, nodeGroup)
threshold, err = c.configGetter.GetScaleDownUtilizationThreshold(nodeGroup)
if err != nil {
return false, err
}

View File

@ -154,8 +154,6 @@ func TestFilterOutUnremovable(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
s := nodegroupconfig.DelegatingNodeGroupConfigProcessor{}
c := NewChecker(&s)
options := config.AutoscalingOptions{
UnremovableNodeRecheckTimeout: 5 * time.Minute,
ScaleDownUnreadyEnabled: tc.scaleDownUnready,
@ -167,6 +165,8 @@ func TestFilterOutUnremovable(t *testing.T) {
IgnoreDaemonSetsUtilization: tc.ignoreDaemonSetsUtilization,
},
}
s := nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults)
c := NewChecker(s)
provider := testprovider.NewTestCloudProvider(nil, nil)
provider.AddNodeGroup("ng1", 1, 10, 2)
for _, n := range tc.nodes {

View File

@ -49,9 +49,9 @@ type node struct {
type scaleDownTimeGetter interface {
// GetScaleDownUnneededTime returns ScaleDownUnneededTime value that should be used for a given NodeGroup.
GetScaleDownUnneededTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
GetScaleDownUnneededTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
// GetScaleDownUnreadyTime returns ScaleDownUnreadyTime value that should be used for a given NodeGroup.
GetScaleDownUnreadyTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
GetScaleDownUnreadyTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
}
// NewNodes returns a new initialized Nodes object.
@ -162,7 +162,7 @@ func (n *Nodes) unremovableReason(context *context.AutoscalingContext, v *node,
if ready {
// Check how long a ready node was underutilized.
unneededTime, err := n.sdtg.GetScaleDownUnneededTime(context, nodeGroup)
unneededTime, err := n.sdtg.GetScaleDownUnneededTime(nodeGroup)
if err != nil {
klog.Errorf("Error trying to get ScaleDownUnneededTime for node %s (in group: %s)", node.Name, nodeGroup.Id())
return simulator.UnexpectedError
@ -172,7 +172,7 @@ func (n *Nodes) unremovableReason(context *context.AutoscalingContext, v *node,
}
} else {
// Unready nodes may be deleted after a different time than underutilized nodes.
unreadyTime, err := n.sdtg.GetScaleDownUnreadyTime(context, nodeGroup)
unreadyTime, err := n.sdtg.GetScaleDownUnreadyTime(nodeGroup)
if err != nil {
klog.Errorf("Error trying to get ScaleDownUnreadyTime for node %s (in group: %s)", node.Name, nodeGroup.Id())
return simulator.UnexpectedError

View File

@ -25,7 +25,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/resource"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
. "k8s.io/autoscaler/cluster-autoscaler/core/test"
@ -235,10 +234,10 @@ func (f *fakeActuationStatus) DeletionsCount(nodeGroup string) int {
type fakeScaleDownTimeGetter struct{}
func (f *fakeScaleDownTimeGetter) GetScaleDownUnneededTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
func (f *fakeScaleDownTimeGetter) GetScaleDownUnneededTime(cloudprovider.NodeGroup) (time.Duration, error) {
return 0 * time.Second, nil
}
func (f *fakeScaleDownTimeGetter) GetScaleDownUnreadyTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
func (f *fakeScaleDownTimeGetter) GetScaleDownUnreadyTime(cloudprovider.NodeGroup) (time.Duration, error) {
return 0 * time.Second, nil
}

View File

@ -186,7 +186,7 @@ func NewTestProcessors(context *context.AutoscalingContext) *processors.Autoscal
NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(),
NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.NodeGroupDefaults),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(),
ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(),
ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(),

View File

@ -452,7 +452,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter
DeleteOptions: deleteOptions,
}
opts.Processors = ca_processors.DefaultProcessors()
opts.Processors = ca_processors.DefaultProcessors(autoscalingOptions)
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime, *forceDaemonSets)
opts.Processors.PodListProcessor = podlistprocessor.NewDefaultPodListProcessor(opts.PredicateChecker)
scaleDownCandidatesComparers := []scaledowncandidates.CandidatesComparer{}

View File

@ -20,23 +20,23 @@ import (
"time"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/config"
)
// NodeGroupConfigProcessor provides config values for a particular NodeGroup.
type NodeGroupConfigProcessor interface {
// GetScaleDownUnneededTime returns ScaleDownUnneededTime value that should be used for a given NodeGroup.
GetScaleDownUnneededTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
GetScaleDownUnneededTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
// GetScaleDownUnreadyTime returns ScaleDownUnreadyTime value that should be used for a given NodeGroup.
GetScaleDownUnreadyTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
GetScaleDownUnreadyTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
// GetScaleDownUtilizationThreshold returns ScaleDownUtilizationThreshold value that should be used for a given NodeGroup.
GetScaleDownUtilizationThreshold(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (float64, error)
GetScaleDownUtilizationThreshold(nodeGroup cloudprovider.NodeGroup) (float64, error)
// GetScaleDownGpuUtilizationThreshold returns ScaleDownGpuUtilizationThreshold value that should be used for a given NodeGroup.
GetScaleDownGpuUtilizationThreshold(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (float64, error)
GetScaleDownGpuUtilizationThreshold(nodeGroup cloudprovider.NodeGroup) (float64, error)
// GetMaxNodeProvisionTime return MaxNodeProvisionTime value that should be used for a given NodeGroup.
GetMaxNodeProvisionTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
GetMaxNodeProvisionTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error)
// GetIgnoreDaemonSetsUtilization returns IgnoreDaemonSetsUtilization value that should be used for a given NodeGroup.
GetIgnoreDaemonSetsUtilization(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (bool, error)
GetIgnoreDaemonSetsUtilization(nodeGroup cloudprovider.NodeGroup) (bool, error)
// CleanUp cleans up processor's internal structures.
CleanUp()
}
@ -45,76 +45,77 @@ type NodeGroupConfigProcessor interface {
// for each NodeGroup. If NodeGroup doesn't return a value default config is
// used instead.
type DelegatingNodeGroupConfigProcessor struct {
nodeGroupDefaults config.NodeGroupAutoscalingOptions
}
// GetScaleDownUnneededTime returns ScaleDownUnneededTime value that should be used for a given NodeGroup.
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownUnneededTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
ngConfig, err := nodeGroup.GetOptions(context.NodeGroupDefaults)
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownUnneededTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return time.Duration(0), err
}
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
return context.NodeGroupDefaults.ScaleDownUnneededTime, nil
return p.nodeGroupDefaults.ScaleDownUnneededTime, nil
}
return ngConfig.ScaleDownUnneededTime, nil
}
// GetScaleDownUnreadyTime returns ScaleDownUnreadyTime value that should be used for a given NodeGroup.
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownUnreadyTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
ngConfig, err := nodeGroup.GetOptions(context.NodeGroupDefaults)
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownUnreadyTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return time.Duration(0), err
}
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
return context.NodeGroupDefaults.ScaleDownUnreadyTime, nil
return p.nodeGroupDefaults.ScaleDownUnreadyTime, nil
}
return ngConfig.ScaleDownUnreadyTime, nil
}
// GetScaleDownUtilizationThreshold returns ScaleDownUtilizationThreshold value that should be used for a given NodeGroup.
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownUtilizationThreshold(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (float64, error) {
ngConfig, err := nodeGroup.GetOptions(context.NodeGroupDefaults)
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownUtilizationThreshold(nodeGroup cloudprovider.NodeGroup) (float64, error) {
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return 0.0, err
}
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
return context.NodeGroupDefaults.ScaleDownUtilizationThreshold, nil
return p.nodeGroupDefaults.ScaleDownUtilizationThreshold, nil
}
return ngConfig.ScaleDownUtilizationThreshold, nil
}
// GetScaleDownGpuUtilizationThreshold returns ScaleDownGpuUtilizationThreshold value that should be used for a given NodeGroup.
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownGpuUtilizationThreshold(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (float64, error) {
ngConfig, err := nodeGroup.GetOptions(context.NodeGroupDefaults)
func (p *DelegatingNodeGroupConfigProcessor) GetScaleDownGpuUtilizationThreshold(nodeGroup cloudprovider.NodeGroup) (float64, error) {
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return 0.0, err
}
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
return context.NodeGroupDefaults.ScaleDownGpuUtilizationThreshold, nil
return p.nodeGroupDefaults.ScaleDownGpuUtilizationThreshold, nil
}
return ngConfig.ScaleDownGpuUtilizationThreshold, nil
}
// GetMaxNodeProvisionTime returns MaxNodeProvisionTime value that should be used for a given NodeGroup.
func (p *DelegatingNodeGroupConfigProcessor) GetMaxNodeProvisionTime(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
ngConfig, err := nodeGroup.GetOptions(context.NodeGroupDefaults)
func (p *DelegatingNodeGroupConfigProcessor) GetMaxNodeProvisionTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return time.Duration(0), err
}
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
return context.NodeGroupDefaults.MaxNodeProvisionTime, nil
return p.nodeGroupDefaults.MaxNodeProvisionTime, nil
}
return ngConfig.MaxNodeProvisionTime, nil
}
// GetIgnoreDaemonSetsUtilization returns IgnoreDaemonSetsUtilization value that should be used for a given NodeGroup.
func (p *DelegatingNodeGroupConfigProcessor) GetIgnoreDaemonSetsUtilization(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (bool, error) {
ngConfig, err := nodeGroup.GetOptions(context.NodeGroupDefaults)
func (p *DelegatingNodeGroupConfigProcessor) GetIgnoreDaemonSetsUtilization(nodeGroup cloudprovider.NodeGroup) (bool, error) {
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return false, err
}
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
return context.NodeGroupDefaults.IgnoreDaemonSetsUtilization, nil
return p.nodeGroupDefaults.IgnoreDaemonSetsUtilization, nil
}
return ngConfig.IgnoreDaemonSetsUtilization, nil
}
@ -124,6 +125,8 @@ func (p *DelegatingNodeGroupConfigProcessor) CleanUp() {
}
// NewDefaultNodeGroupConfigProcessor returns a default instance of NodeGroupConfigProcessor.
func NewDefaultNodeGroupConfigProcessor() NodeGroupConfigProcessor {
return &DelegatingNodeGroupConfigProcessor{}
func NewDefaultNodeGroupConfigProcessor(nodeGroupDefaults config.NodeGroupAutoscalingOptions) NodeGroupConfigProcessor {
return &DelegatingNodeGroupConfigProcessor{
nodeGroupDefaults: nodeGroupDefaults,
}
}

View File

@ -22,12 +22,10 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/mocks"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"github.com/stretchr/testify/assert"
)
// This test covers all Get* methods implemented by
@ -60,8 +58,8 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
IgnoreDaemonSetsUtilization: false,
}
testUnneededTime := func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownUnneededTime(c, ng)
testUnneededTime := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownUnneededTime(ng)
assert.Equal(t, err, we)
results := map[Want]time.Duration{
NIL: time.Duration(0),
@ -70,8 +68,8 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
}
assert.Equal(t, res, results[w])
}
testUnreadyTime := func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownUnreadyTime(c, ng)
testUnreadyTime := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownUnreadyTime(ng)
assert.Equal(t, err, we)
results := map[Want]time.Duration{
NIL: time.Duration(0),
@ -80,8 +78,8 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
}
assert.Equal(t, res, results[w])
}
testUtilizationThreshold := func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownUtilizationThreshold(c, ng)
testUtilizationThreshold := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownUtilizationThreshold(ng)
assert.Equal(t, err, we)
results := map[Want]float64{
NIL: 0.0,
@ -90,8 +88,8 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
}
assert.Equal(t, res, results[w])
}
testGpuThreshold := func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownGpuUtilizationThreshold(c, ng)
testGpuThreshold := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetScaleDownGpuUtilizationThreshold(ng)
assert.Equal(t, err, we)
results := map[Want]float64{
NIL: 0.0,
@ -100,8 +98,8 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
}
assert.Equal(t, res, results[w])
}
testMaxNodeProvisionTime := func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetMaxNodeProvisionTime(c, ng)
testMaxNodeProvisionTime := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetMaxNodeProvisionTime(ng)
assert.Equal(t, err, we)
results := map[Want]time.Duration{
NIL: time.Duration(0),
@ -112,8 +110,8 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
}
// for IgnoreDaemonSetsUtilization
testIgnoreDSUtilization := func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetIgnoreDaemonSetsUtilization(c, ng)
testIgnoreDSUtilization := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
res, err := p.GetIgnoreDaemonSetsUtilization(ng)
assert.Equal(t, err, we)
results := map[Want]bool{
NIL: false,
@ -123,30 +121,30 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
assert.Equal(t, res, results[w])
}
funcs := map[string]func(*testing.T, DelegatingNodeGroupConfigProcessor, *context.AutoscalingContext, cloudprovider.NodeGroup, Want, error){
funcs := map[string]func(*testing.T, NodeGroupConfigProcessor, cloudprovider.NodeGroup, Want, error){
"ScaleDownUnneededTime": testUnneededTime,
"ScaleDownUnreadyTime": testUnreadyTime,
"ScaleDownUtilizationThreshold": testUtilizationThreshold,
"ScaleDownGpuUtilizationThreshold": testGpuThreshold,
"MaxNodeProvisionTime": testMaxNodeProvisionTime,
"IgnoreDaemonSetsUtilization": testIgnoreDSUtilization,
"MultipleOptions": func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
testUnneededTime(t, p, c, ng, w, we)
testUnreadyTime(t, p, c, ng, w, we)
testUtilizationThreshold(t, p, c, ng, w, we)
testGpuThreshold(t, p, c, ng, w, we)
testMaxNodeProvisionTime(t, p, c, ng, w, we)
testIgnoreDSUtilization(t, p, c, ng, w, we)
"MultipleOptions": func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
testUnneededTime(t, p, ng, w, we)
testUnreadyTime(t, p, ng, w, we)
testUtilizationThreshold(t, p, ng, w, we)
testGpuThreshold(t, p, ng, w, we)
testMaxNodeProvisionTime(t, p, ng, w, we)
testIgnoreDSUtilization(t, p, ng, w, we)
},
"RepeatingTheSameCallGivesConsistentResults": func(t *testing.T, p DelegatingNodeGroupConfigProcessor, c *context.AutoscalingContext, ng cloudprovider.NodeGroup, w Want, we error) {
testUnneededTime(t, p, c, ng, w, we)
testUnneededTime(t, p, c, ng, w, we)
"RepeatingTheSameCallGivesConsistentResults": func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
testUnneededTime(t, p, ng, w, we)
testUnneededTime(t, p, ng, w, we)
// throw in a different call
testGpuThreshold(t, p, c, ng, w, we)
testUnneededTime(t, p, c, ng, w, we)
testGpuThreshold(t, p, ng, w, we)
testUnneededTime(t, p, ng, w, we)
// throw in another different call
testIgnoreDSUtilization(t, p, c, ng, w, we)
testUnneededTime(t, p, c, ng, w, we)
testIgnoreDSUtilization(t, p, ng, w, we)
testUnneededTime(t, p, ng, w, we)
},
}
@ -180,15 +178,10 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
}
for tn, tc := range cases {
t.Run(fmt.Sprintf("[%s] %s", fname, tn), func(t *testing.T) {
context := &context.AutoscalingContext{
AutoscalingOptions: config.AutoscalingOptions{
NodeGroupDefaults: tc.globalOptions,
},
}
ng := &mocks.NodeGroup{}
ng.On("GetOptions", tc.globalOptions).Return(tc.ngOptions, tc.ngError)
p := DelegatingNodeGroupConfigProcessor{}
fn(t, p, context, ng, tc.want, tc.wantError)
p := NewDefaultNodeGroupConfigProcessor(tc.globalOptions)
fn(t, p, ng, tc.want, tc.wantError)
})
}
}

View File

@ -70,7 +70,7 @@ type AutoscalingProcessors struct {
}
// DefaultProcessors returns default set of processors.
func DefaultProcessors() *AutoscalingProcessors {
func DefaultProcessors(options config.AutoscalingOptions) *AutoscalingProcessors {
return &AutoscalingProcessors{
PodListProcessor: pods.NewDefaultPodListProcessor(),
NodeGroupListProcessor: nodegroups.NewDefaultNodeGroupListProcessor(),
@ -87,7 +87,7 @@ func DefaultProcessors() *AutoscalingProcessors {
AutoscalingStatusProcessor: status.NewDefaultAutoscalingStatusProcessor(),
NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(),
NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(),
ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),