Define ProcessorCallbacks interface
This commit is contained in:
parent
63a214e65d
commit
c6115b826e
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander"
|
||||
processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
|
||||
kube_client "k8s.io/client-go/kubernetes"
|
||||
|
|
@ -45,6 +46,8 @@ type AutoscalingContext struct {
|
|||
ExpanderStrategy expander.Strategy
|
||||
// EstimatorBuilder is the builder function for node count estimator to be used.
|
||||
EstimatorBuilder estimator.EstimatorBuilder
|
||||
// ProcessorCallbacks is interface defining extra callback methods which can be called by processors used in extension points.
|
||||
ProcessorCallbacks processor_callbacks.ProcessorCallbacks
|
||||
}
|
||||
|
||||
// AutoscalingKubeClients contains all Kubernetes API clients,
|
||||
|
|
@ -81,7 +84,9 @@ func NewResourceLimiterFromAutoscalingOptions(options config.AutoscalingOptions)
|
|||
|
||||
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
|
||||
func NewAutoscalingContext(options config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
|
||||
autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy, estimatorBuilder estimator.EstimatorBuilder) *AutoscalingContext {
|
||||
autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider,
|
||||
expanderStrategy expander.Strategy, estimatorBuilder estimator.EstimatorBuilder,
|
||||
processorCallbacks processor_callbacks.ProcessorCallbacks) *AutoscalingContext {
|
||||
return &AutoscalingContext{
|
||||
AutoscalingOptions: options,
|
||||
CloudProvider: cloudProvider,
|
||||
|
|
@ -89,6 +94,7 @@ func NewAutoscalingContext(options config.AutoscalingOptions, predicateChecker *
|
|||
PredicateChecker: predicateChecker,
|
||||
ExpanderStrategy: expanderStrategy,
|
||||
EstimatorBuilder: estimatorBuilder,
|
||||
ProcessorCallbacks: processorCallbacks,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ func TestFindUnneededNodes(t *testing.T) {
|
|||
ExpendablePodsPriorityCutoff: 10,
|
||||
UnremovableNodeRecheckTimeout: 5 * time.Minute,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -217,7 +217,7 @@ func TestFindUnneededGPUNodes(t *testing.T) {
|
|||
ScaleDownGpuUtilizationThreshold: 0.3,
|
||||
UnremovableNodeRecheckTimeout: 5 * time.Minute,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -298,7 +298,7 @@ func TestPodsWithPrioritiesFindUnneededNodes(t *testing.T) {
|
|||
ScaleDownUtilizationThreshold: 0.35,
|
||||
ExpendablePodsPriorityCutoff: 10,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -348,7 +348,7 @@ func TestFindUnneededMaxCandidates(t *testing.T) {
|
|||
ScaleDownCandidatesPoolRatio: 1,
|
||||
ScaleDownCandidatesPoolMinCount: 1000,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -414,7 +414,7 @@ func TestFindUnneededEmptyNodes(t *testing.T) {
|
|||
ScaleDownCandidatesPoolRatio: 1.0,
|
||||
ScaleDownCandidatesPoolMinCount: 1000,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -458,7 +458,7 @@ func TestFindUnneededNodePool(t *testing.T) {
|
|||
ScaleDownCandidatesPoolRatio: 0.1,
|
||||
ScaleDownCandidatesPoolMinCount: 10,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -593,7 +593,7 @@ func TestDeleteNode(t *testing.T) {
|
|||
fakeClient.Fake.AddReactor("get", "pods", podNotFoundFunc)
|
||||
|
||||
// build context
|
||||
context := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
sd := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -826,7 +826,7 @@ func TestScaleDown(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)
|
||||
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
scaleDown := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -1039,7 +1039,7 @@ func simpleScaleDownEmpty(t *testing.T, config *scaleTestConfig) {
|
|||
|
||||
assert.NotNil(t, provider)
|
||||
|
||||
context := NewScaleTestAutoscalingContext(config.options, fakeClient, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(config.options, fakeClient, nil, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
scaleDown := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -1114,7 +1114,7 @@ func TestNoScaleDownUnready(t *testing.T) {
|
|||
ScaleDownUnreadyTime: time.Hour,
|
||||
MaxGracefulTerminationSec: 60,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, nil, provider, nil)
|
||||
|
||||
// N1 is unready so it requires a bigger unneeded time.
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
|
|
@ -1218,7 +1218,7 @@ func TestScaleDownNoMove(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)
|
||||
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
scaleDown := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -1467,7 +1467,7 @@ func TestSoftTaint(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)
|
||||
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
scaleDown := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
@ -1577,7 +1577,7 @@ func TestSoftTaintTimeLimit(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)
|
||||
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
|
||||
|
||||
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
scaleDown := NewScaleDown(&context, clusterStateRegistry)
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/estimator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
|
|
@ -77,7 +78,10 @@ type scaleTestConfig struct {
|
|||
}
|
||||
|
||||
// NewScaleTestAutoscalingContext creates a new test autoscaling context for scaling tests.
|
||||
func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClient kube_client.Interface, listers kube_util.ListerRegistry, provider cloudprovider.CloudProvider) context.AutoscalingContext {
|
||||
func NewScaleTestAutoscalingContext(
|
||||
options config.AutoscalingOptions, fakeClient kube_client.Interface,
|
||||
listers kube_util.ListerRegistry, provider cloudprovider.CloudProvider,
|
||||
processorCallbacks processor_callbacks.ProcessorCallbacks) context.AutoscalingContext {
|
||||
fakeRecorder := kube_record.NewFakeRecorder(5)
|
||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||
// Ignoring error here is safe - if a test doesn't specify valid estimatorName,
|
||||
|
|
@ -91,10 +95,11 @@ func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClien
|
|||
LogRecorder: fakeLogRecorder,
|
||||
ListerRegistry: listers,
|
||||
},
|
||||
CloudProvider: provider,
|
||||
PredicateChecker: simulator.NewTestPredicateChecker(),
|
||||
ExpanderStrategy: random.NewStrategy(),
|
||||
EstimatorBuilder: estimatorBuilder,
|
||||
CloudProvider: provider,
|
||||
PredicateChecker: simulator.NewTestPredicateChecker(),
|
||||
ExpanderStrategy: random.NewStrategy(),
|
||||
EstimatorBuilder: estimatorBuilder,
|
||||
ProcessorCallbacks: processorCallbacks,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -404,7 +404,7 @@ func simpleScaleUpTest(t *testing.T, config *scaleTestConfig) {
|
|||
assert.NotNil(t, provider)
|
||||
|
||||
// Create context with non-random expander strategy.
|
||||
context := NewScaleTestAutoscalingContext(config.options, &fake.Clientset{}, listers, provider)
|
||||
context := NewScaleTestAutoscalingContext(config.options, &fake.Clientset{}, listers, provider, nil)
|
||||
expander := assertingStrategy{
|
||||
initialNodeConfigs: config.nodes,
|
||||
expectedScaleUpOptions: config.expectedScaleUpOptions,
|
||||
|
|
@ -497,7 +497,7 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
|
|||
MaxCoresTotal: config.DefaultMaxClusterCores,
|
||||
MaxMemoryTotal: config.DefaultMaxClusterMemory,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
|
||||
|
||||
nodes := []*apiv1.Node{n1, n2}
|
||||
nodeInfos, _ := getNodeInfosForGroups(nodes, nil, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
|
||||
|
|
@ -543,7 +543,7 @@ func TestScaleUpNodeComingHasScale(t *testing.T) {
|
|||
provider.AddNode("ng1", n1)
|
||||
provider.AddNode("ng2", n2)
|
||||
|
||||
context := NewScaleTestAutoscalingContext(defaultOptions, &fake.Clientset{}, listers, provider)
|
||||
context := NewScaleTestAutoscalingContext(defaultOptions, &fake.Clientset{}, listers, provider, nil)
|
||||
|
||||
nodes := []*apiv1.Node{n1, n2}
|
||||
nodeInfos, _ := getNodeInfosForGroups(nodes, nil, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
|
||||
|
|
@ -597,7 +597,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
|
|||
MaxCoresTotal: config.DefaultMaxClusterCores,
|
||||
MaxMemoryTotal: config.DefaultMaxClusterMemory,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
|
||||
|
||||
nodes := []*apiv1.Node{n1, n2}
|
||||
nodeInfos, _ := getNodeInfosForGroups(nodes, nil, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
|
||||
|
|
@ -636,7 +636,7 @@ func TestScaleUpNoHelp(t *testing.T) {
|
|||
MaxCoresTotal: config.DefaultMaxClusterCores,
|
||||
MaxMemoryTotal: config.DefaultMaxClusterMemory,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
|
||||
|
||||
nodes := []*apiv1.Node{n1}
|
||||
nodeInfos, _ := getNodeInfosForGroups(nodes, nil, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
|
||||
|
|
@ -701,7 +701,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
|
|||
MaxCoresTotal: config.DefaultMaxClusterCores,
|
||||
MaxMemoryTotal: config.DefaultMaxClusterMemory,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
|
||||
|
||||
nodeInfos, _ := getNodeInfosForGroups(nodes, nil, provider, listers, []*appsv1.DaemonSet{}, context.PredicateChecker)
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
|
|
@ -759,7 +759,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
|
|||
NodeAutoprovisioningEnabled: true,
|
||||
MaxAutoprovisionedNodeGroupCount: 10,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, nil, provider)
|
||||
context := NewScaleTestAutoscalingContext(options, fakeClient, nil, provider, nil)
|
||||
|
||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
|
||||
|
||||
|
|
|
|||
|
|
@ -66,11 +66,30 @@ type StaticAutoscaler struct {
|
|||
lastScaleDownFailTime time.Time
|
||||
scaleDown *ScaleDown
|
||||
processors *ca_processors.AutoscalingProcessors
|
||||
processorCallbacks *staticAutoscalerProcessorCallbacks
|
||||
initialized bool
|
||||
// Caches nodeInfo computed for previously seen nodes
|
||||
nodeInfoCache map[string]*schedulernodeinfo.NodeInfo
|
||||
}
|
||||
|
||||
type staticAutoscalerProcessorCallbacks struct {
|
||||
disableScaleDownForLoop bool
|
||||
}
|
||||
|
||||
func newStaticAutoscalerProcessorCallbacks() *staticAutoscalerProcessorCallbacks {
|
||||
callbacks := &staticAutoscalerProcessorCallbacks{}
|
||||
callbacks.reset()
|
||||
return callbacks
|
||||
}
|
||||
|
||||
func (callbacks *staticAutoscalerProcessorCallbacks) DisableScaleDownForLoop() {
|
||||
callbacks.disableScaleDownForLoop = true
|
||||
}
|
||||
|
||||
func (callbacks *staticAutoscalerProcessorCallbacks) reset() {
|
||||
callbacks.disableScaleDownForLoop = false
|
||||
}
|
||||
|
||||
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
|
||||
func NewStaticAutoscaler(
|
||||
opts config.AutoscalingOptions,
|
||||
|
|
@ -81,7 +100,9 @@ func NewStaticAutoscaler(
|
|||
expanderStrategy expander.Strategy,
|
||||
estimatorBuilder estimator.EstimatorBuilder,
|
||||
backoff backoff.Backoff) *StaticAutoscaler {
|
||||
autoscalingContext := context.NewAutoscalingContext(opts, predicateChecker, autoscalingKubeClients, cloudProvider, expanderStrategy, estimatorBuilder)
|
||||
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
autoscalingContext := context.NewAutoscalingContext(opts, predicateChecker, autoscalingKubeClients, cloudProvider, expanderStrategy, estimatorBuilder, processorCallbacks)
|
||||
|
||||
clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
|
||||
MaxTotalUnreadyPercentage: opts.MaxTotalUnreadyPercentage,
|
||||
|
|
@ -100,6 +121,7 @@ func NewStaticAutoscaler(
|
|||
lastScaleDownFailTime: time.Now(),
|
||||
scaleDown: scaleDown,
|
||||
processors: processors,
|
||||
processorCallbacks: processorCallbacks,
|
||||
clusterStateRegistry: clusterStateRegistry,
|
||||
nodeInfoCache: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
}
|
||||
|
|
@ -128,6 +150,7 @@ func (a *StaticAutoscaler) cleanUpIfRequired() {
|
|||
// RunOnce iterates over node groups and scales them up/down if necessary
|
||||
func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError {
|
||||
a.cleanUpIfRequired()
|
||||
a.processorCallbacks.reset()
|
||||
|
||||
unschedulablePodLister := a.UnschedulablePodLister()
|
||||
scheduledPodLister := a.ScheduledPodLister()
|
||||
|
|
@ -382,6 +405,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
|
|||
}
|
||||
}
|
||||
|
||||
scaleDownForbidden = scaleDownForbidden || a.processorCallbacks.disableScaleDownForLoop
|
||||
scaleDownInCooldown := scaleDownForbidden ||
|
||||
a.lastScaleUpTime.Add(a.ScaleDownDelayAfterAdd).After(currentTime) ||
|
||||
a.lastScaleDownFailTime.Add(a.ScaleDownDelayAfterFailure).After(currentTime) ||
|
||||
|
|
|
|||
|
|
@ -187,7 +187,8 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
ScaleDownUnneededTime: time.Minute,
|
||||
FilterOutSchedulablePodsUsesPacking: true,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
|
||||
listerRegistry := kube_util.NewListerRegistry(allNodeListerMock, readyNodeListerMock, scheduledPodMock,
|
||||
unschedulablePodMock, podDisruptionBudgetListerMock, daemonSetListerMock,
|
||||
nil, nil, nil, nil)
|
||||
|
|
@ -208,6 +209,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
|
|||
lastScaleDownFailTime: time.Now(),
|
||||
scaleDown: sd,
|
||||
processors: ca_processors.TestProcessors(),
|
||||
processorCallbacks: processorCallbacks,
|
||||
initialized: true,
|
||||
}
|
||||
|
||||
|
|
@ -371,7 +373,8 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
MaxAutoprovisionedNodeGroupCount: 10, // Pods with null priority are always non expendable. Test if it works.
|
||||
FilterOutSchedulablePodsUsesPacking: true,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
|
||||
listerRegistry := kube_util.NewListerRegistry(allNodeListerMock, readyNodeListerMock, scheduledPodMock,
|
||||
unschedulablePodMock, podDisruptionBudgetListerMock, daemonSetListerMock,
|
||||
nil, nil, nil, nil)
|
||||
|
|
@ -392,6 +395,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
|
|||
lastScaleDownFailTime: time.Now(),
|
||||
scaleDown: sd,
|
||||
processors: processors,
|
||||
processorCallbacks: processorCallbacks,
|
||||
initialized: true,
|
||||
}
|
||||
|
||||
|
|
@ -500,7 +504,8 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
|
|||
MaxNodeProvisionTime: 10 * time.Second,
|
||||
FilterOutSchedulablePodsUsesPacking: true,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
|
||||
listerRegistry := kube_util.NewListerRegistry(allNodeListerMock, readyNodeListerMock, scheduledPodMock,
|
||||
unschedulablePodMock, podDisruptionBudgetListerMock, daemonSetListerMock,
|
||||
nil, nil, nil, nil)
|
||||
|
|
@ -529,6 +534,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
|
|||
lastScaleDownFailTime: time.Now(),
|
||||
scaleDown: sd,
|
||||
processors: ca_processors.TestProcessors(),
|
||||
processorCallbacks: processorCallbacks,
|
||||
}
|
||||
|
||||
// Scale up.
|
||||
|
|
@ -632,7 +638,8 @@ func TestStaticAutoscalerRunOncePodsWithFilterOutSchedulablePodsUsesPackingFalse
|
|||
//Turn off filtering schedulables using packing
|
||||
FilterOutSchedulablePodsUsesPacking: false,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
|
||||
listerRegistry := kube_util.NewListerRegistry(allNodeListerMock, readyNodeListerMock, scheduledPodMock,
|
||||
unschedulablePodMock, podDisruptionBudgetListerMock, daemonSetListerMock,
|
||||
nil, nil, nil, nil)
|
||||
|
|
@ -653,6 +660,7 @@ func TestStaticAutoscalerRunOncePodsWithFilterOutSchedulablePodsUsesPackingFalse
|
|||
lastScaleDownFailTime: time.Now(),
|
||||
scaleDown: sd,
|
||||
processors: ca_processors.TestProcessors(),
|
||||
processorCallbacks: processorCallbacks,
|
||||
}
|
||||
|
||||
// Scale up
|
||||
|
|
@ -747,7 +755,8 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
|
|||
ExpendablePodsPriorityCutoff: 10,
|
||||
FilterOutSchedulablePodsUsesPacking: true,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
|
||||
listerRegistry := kube_util.NewListerRegistry(allNodeListerMock, readyNodeListerMock, scheduledPodMock,
|
||||
unschedulablePodMock, podDisruptionBudgetListerMock, daemonSetListerMock,
|
||||
nil, nil, nil, nil)
|
||||
|
|
@ -768,6 +777,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
|
|||
lastScaleDownFailTime: time.Now(),
|
||||
scaleDown: sd,
|
||||
processors: ca_processors.TestProcessors(),
|
||||
processorCallbacks: processorCallbacks,
|
||||
}
|
||||
|
||||
// Scale up
|
||||
|
|
@ -834,7 +844,8 @@ func TestStaticAutoscalerOutOfResources(t *testing.T) {
|
|||
ExpendablePodsPriorityCutoff: 10,
|
||||
FilterOutSchedulablePodsUsesPacking: true,
|
||||
}
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider)
|
||||
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
|
||||
context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks)
|
||||
|
||||
clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
|
||||
OkTotalUnreadyCount: 1,
|
||||
|
|
@ -847,6 +858,7 @@ func TestStaticAutoscalerOutOfResources(t *testing.T) {
|
|||
clusterStateRegistry: clusterState,
|
||||
lastScaleUpTime: time.Now(),
|
||||
lastScaleDownFailTime: time.Now(),
|
||||
processorCallbacks: processorCallbacks,
|
||||
}
|
||||
|
||||
nodeGroupA := &mockprovider.NodeGroup{}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package callbacks
|
||||
|
||||
// ProcessorCallbacks is interface defining extra callback methods which can be called by processors used in extension points.
|
||||
type ProcessorCallbacks interface {
|
||||
// DisableScaleDownForLoop disables scale down for current loop iteration
|
||||
DisableScaleDownForLoop()
|
||||
}
|
||||
Loading…
Reference in New Issue