Move autoscaling options out of static

This commit is contained in:
Aleksandra Malinowska 2018-07-25 10:47:09 +02:00
parent 03d18e9508
commit 0976d2aa07
15 changed files with 67 additions and 69 deletions

View File

@ -25,7 +25,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/kubemark" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/kubemark"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
kubeclient "k8s.io/client-go/kubernetes" kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
@ -48,7 +48,7 @@ var AvailableCloudProviders = []string{
const DefaultCloudProvider = gce.ProviderNameGCE const DefaultCloudProvider = gce.ProviderNameGCE
// NewCloudProvider builds a cloud provider from provided parameters. // NewCloudProvider builds a cloud provider from provided parameters.
func NewCloudProvider(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { func NewCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider {
glog.V(1).Infof("Building %s cloud provider.", opts.CloudProviderName) glog.V(1).Infof("Building %s cloud provider.", opts.CloudProviderName)
switch opts.CloudProviderName { switch opts.CloudProviderName {
case gce.ProviderNameGCE: case gce.ProviderNameGCE:
@ -78,7 +78,7 @@ func NewCloudProvider(opts static.AutoscalingOptions, do cloudprovider.NodeGroup
return nil // This will never happen because the Fatalf will os.Exit return nil // This will never happen because the Fatalf will os.Exit
} }
func buildGCE(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, mode gce.GcpCloudProviderMode) cloudprovider.CloudProvider { func buildGCE(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, mode gce.GcpCloudProviderMode) cloudprovider.CloudProvider {
var config io.ReadCloser var config io.ReadCloser
if opts.CloudConfig != "" { if opts.CloudConfig != "" {
var err error var err error
@ -101,7 +101,7 @@ func buildGCE(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscover
return provider return provider
} }
func buildAWS(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { func buildAWS(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider {
var config io.ReadCloser var config io.ReadCloser
if opts.CloudConfig != "" { if opts.CloudConfig != "" {
var err error var err error
@ -124,7 +124,7 @@ func buildAWS(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscover
return provider return provider
} }
func buildAzure(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { func buildAzure(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider {
var config io.ReadCloser var config io.ReadCloser
if opts.CloudConfig != "" { if opts.CloudConfig != "" {
glog.Info("Creating Azure Manager using cloud-config file: %v", opts.CloudConfig) glog.Info("Creating Azure Manager using cloud-config file: %v", opts.CloudConfig)
@ -148,7 +148,7 @@ func buildAzure(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscov
return provider return provider
} }
func buildKubemark(opts static.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { func buildKubemark(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider {
externalConfig, err := rest.InClusterConfig() externalConfig, err := rest.InClusterConfig()
if err != nil { if err != nil {
glog.Fatalf("Failed to get kubeclient config for external cluster: %v", err) glog.Fatalf("Failed to get kubeclient config for external cluster: %v", err)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package static package config
import ( import (
"time" "time"

View File

@ -20,7 +20,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/expander/factory" "k8s.io/autoscaler/cluster-autoscaler/expander/factory"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
@ -34,7 +34,7 @@ import (
// scale up/scale down functions. // scale up/scale down functions.
type AutoscalingContext struct { type AutoscalingContext struct {
// Options to customize how autoscaling works // Options to customize how autoscaling works
static.AutoscalingOptions config.AutoscalingOptions
// CloudProvider used in CA. // CloudProvider used in CA.
CloudProvider cloudprovider.CloudProvider CloudProvider cloudprovider.CloudProvider
// ClientSet interface. // ClientSet interface.
@ -52,7 +52,7 @@ type AutoscalingContext struct {
// NewResourceLimiterFromAutoscalingOptions creates new instance of cloudprovider.ResourceLimiter // NewResourceLimiterFromAutoscalingOptions creates new instance of cloudprovider.ResourceLimiter
// reading limits from AutoscalingOptions struct. // reading limits from AutoscalingOptions struct.
func NewResourceLimiterFromAutoscalingOptions(options static.AutoscalingOptions) *cloudprovider.ResourceLimiter { func NewResourceLimiterFromAutoscalingOptions(options config.AutoscalingOptions) *cloudprovider.ResourceLimiter {
// build min/max maps for resources limits // build min/max maps for resources limits
minResources := make(map[string]int64) minResources := make(map[string]int64)
maxResources := make(map[string]int64) maxResources := make(map[string]int64)
@ -70,7 +70,7 @@ func NewResourceLimiterFromAutoscalingOptions(options static.AutoscalingOptions)
} }
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments // NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
func NewAutoscalingContext(options static.AutoscalingOptions, predicateChecker *simulator.PredicateChecker, func NewAutoscalingContext(options config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder,
logEventRecorder *utils.LogEventRecorder, listerRegistry kube_util.ListerRegistry) (*AutoscalingContext, errors.AutoscalerError) { logEventRecorder *utils.LogEventRecorder, listerRegistry kube_util.ListerRegistry) (*AutoscalingContext, errors.AutoscalerError) {

View File

@ -20,7 +20,7 @@ import (
"testing" "testing"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@ -36,7 +36,7 @@ func TestNewAutoscalingContext(t *testing.T) {
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
autoscalingContext, err := NewAutoscalingContext( autoscalingContext, err := NewAutoscalingContext(
static.AutoscalingOptions{ config.AutoscalingOptions{
ExpanderName: expander.RandomExpanderName, ExpanderName: expander.RandomExpanderName,
MaxCoresTotal: 10, MaxCoresTotal: 10,
MinCoresTotal: 1, MinCoresTotal: 1,

View File

@ -19,7 +19,7 @@ package core
import ( import (
"time" "time"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
@ -30,7 +30,7 @@ import (
// AutoscalerOptions is the whole set of options for configuring an autoscaler // AutoscalerOptions is the whole set of options for configuring an autoscaler
type AutoscalerOptions struct { type AutoscalerOptions struct {
static.AutoscalingOptions config.AutoscalingOptions
KubeClient kube_client.Interface KubeClient kube_client.Interface
KubeEventRecorder kube_record.EventRecorder KubeEventRecorder kube_record.EventRecorder
PredicateChecker *simulator.PredicateChecker PredicateChecker *simulator.PredicateChecker

View File

@ -32,7 +32,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/config/static"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@ -127,7 +126,7 @@ func TestFindUnneededNodes(t *testing.T) {
provider.AddNode("ng1", n9) provider.AddNode("ng1", n9)
context := context.AutoscalingContext{ context := context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
ExpendablePodsPriorityCutoff: 10, ExpendablePodsPriorityCutoff: 10,
UnremovableNodeRecheckTimeout: 5 * time.Minute, UnremovableNodeRecheckTimeout: 5 * time.Minute,
@ -247,7 +246,7 @@ func TestPodsWithPrioritiesFindUnneededNodes(t *testing.T) {
provider.AddNode("ng1", n4) provider.AddNode("ng1", n4)
context := context.AutoscalingContext{ context := context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
ExpendablePodsPriorityCutoff: 10, ExpendablePodsPriorityCutoff: 10,
}, },
@ -303,7 +302,7 @@ func TestFindUnneededMaxCandidates(t *testing.T) {
numCandidates := 30 numCandidates := 30
context := context.AutoscalingContext{ context := context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
ScaleDownNonEmptyCandidatesCount: numCandidates, ScaleDownNonEmptyCandidatesCount: numCandidates,
ScaleDownCandidatesPoolRatio: 1, ScaleDownCandidatesPoolRatio: 1,
@ -376,7 +375,7 @@ func TestFindUnneededEmptyNodes(t *testing.T) {
numCandidates := 30 numCandidates := 30
context := context.AutoscalingContext{ context := context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
ScaleDownNonEmptyCandidatesCount: numCandidates, ScaleDownNonEmptyCandidatesCount: numCandidates,
ScaleDownCandidatesPoolRatio: 1.0, ScaleDownCandidatesPoolRatio: 1.0,
@ -427,7 +426,7 @@ func TestFindUnneededNodePool(t *testing.T) {
numCandidates := 30 numCandidates := 30
context := context.AutoscalingContext{ context := context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
ScaleDownNonEmptyCandidatesCount: numCandidates, ScaleDownNonEmptyCandidatesCount: numCandidates,
ScaleDownCandidatesPoolRatio: 0.1, ScaleDownCandidatesPoolRatio: 0.1,
@ -574,7 +573,7 @@ func TestDeleteNode(t *testing.T) {
// build context // build context
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{}, AutoscalingOptions: config.AutoscalingOptions{},
ClientSet: fakeClient, ClientSet: fakeClient,
Recorder: fakeRecorder, Recorder: fakeRecorder,
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
@ -805,7 +804,7 @@ func TestScaleDown(t *testing.T) {
fakeRecorder := kube_util.CreateEventRecorder(fakeClient) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute, ScaleDownUnneededTime: time.Minute,
MaxGracefulTerminationSec: 60, MaxGracefulTerminationSec: 60,
@ -860,7 +859,7 @@ func assertSubset(t *testing.T, a []string, b []string) {
} }
} }
var defaultScaleDownOptions = static.AutoscalingOptions{ var defaultScaleDownOptions = config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute, ScaleDownUnneededTime: time.Minute,
MaxGracefulTerminationSec: 60, MaxGracefulTerminationSec: 60,
@ -929,7 +928,7 @@ func TestScaleDownEmptyMinMemoryLimitHit(t *testing.T) {
func TestScaleDownEmptyMinGpuLimitHit(t *testing.T) { func TestScaleDownEmptyMinGpuLimitHit(t *testing.T) {
options := defaultScaleDownOptions options := defaultScaleDownOptions
options.GpuTotal = []static.GpuLimits{ options.GpuTotal = []config.GpuLimits{
{ {
GpuType: gpu.DefaultGPUType, GpuType: gpu.DefaultGPUType,
Min: 4, Min: 4,
@ -1104,7 +1103,7 @@ func TestNoScaleDownUnready(t *testing.T) {
fakeRecorder := kube_util.CreateEventRecorder(fakeClient) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute, ScaleDownUnneededTime: time.Minute,
ScaleDownUnreadyTime: time.Hour, ScaleDownUnreadyTime: time.Hour,
@ -1212,7 +1211,7 @@ func TestScaleDownNoMove(t *testing.T) {
fakeRecorder := kube_util.CreateEventRecorder(fakeClient) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
ScaleDownUnneededTime: time.Minute, ScaleDownUnneededTime: time.Minute,
ScaleDownUnreadyTime: time.Hour, ScaleDownUnreadyTime: time.Hour,

View File

@ -16,7 +16,7 @@ limitations under the License.
package core package core
import "k8s.io/autoscaler/cluster-autoscaler/config/static" import "k8s.io/autoscaler/cluster-autoscaler/config"
type nodeConfig struct { type nodeConfig struct {
name string name string
@ -48,5 +48,5 @@ type scaleTestConfig struct {
scaleUpOptionToChoose groupSizeChange // this will be selected by assertingStrategy.BestOption scaleUpOptionToChoose groupSizeChange // this will be selected by assertingStrategy.BestOption
expectedFinalScaleUp groupSizeChange // we expect this to be delivered via scale-up event expectedFinalScaleUp groupSizeChange // we expect this to be delivered via scale-up event
expectedScaleDowns []string expectedScaleDowns []string
options static.AutoscalingOptions options config.AutoscalingOptions
} }

View File

@ -28,7 +28,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/config/static"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander/random" "k8s.io/autoscaler/cluster-autoscaler/expander/random"
@ -51,7 +50,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander"
) )
var defaultOptions = static.AutoscalingOptions{ var defaultOptions = config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
MaxCoresTotal: config.DefaultMaxClusterCores, MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory * units.Gigabyte, MaxMemoryTotal: config.DefaultMaxClusterMemory * units.Gigabyte,
@ -539,7 +538,7 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
MaxCoresTotal: config.DefaultMaxClusterCores, MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory, MaxMemoryTotal: config.DefaultMaxClusterMemory,
@ -664,7 +663,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
MaxCoresTotal: config.DefaultMaxClusterCores, MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory, MaxMemoryTotal: config.DefaultMaxClusterMemory,
@ -716,7 +715,7 @@ func TestScaleUpNoHelp(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now())
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
MaxCoresTotal: config.DefaultMaxClusterCores, MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory, MaxMemoryTotal: config.DefaultMaxClusterMemory,
@ -798,7 +797,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes(nodes, time.Now()) clusterState.UpdateNodes(nodes, time.Now())
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
BalanceSimilarNodeGroups: true, BalanceSimilarNodeGroups: true,
MaxCoresTotal: config.DefaultMaxClusterCores, MaxCoresTotal: config.DefaultMaxClusterCores,
@ -862,7 +861,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
MaxCoresTotal: 5000 * 64, MaxCoresTotal: 5000 * 64,
MaxMemoryTotal: 5000 * 64 * 20, MaxMemoryTotal: 5000 * 64 * 20,

View File

@ -21,7 +21,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/metrics"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
@ -66,7 +66,7 @@ type StaticAutoscaler struct {
} }
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters // NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
func NewStaticAutoscaler(opts static.AutoscalingOptions, predicateChecker *simulator.PredicateChecker, func NewStaticAutoscaler(opts config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry, kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry,
processors *ca_processors.AutoscalingProcessors) (*StaticAutoscaler, errors.AutoscalerError) { processors *ca_processors.AutoscalingProcessors) (*StaticAutoscaler, errors.AutoscalerError) {
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap) logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap)

View File

@ -24,7 +24,7 @@ import (
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander/random" "k8s.io/autoscaler/cluster-autoscaler/expander/random"
@ -173,7 +173,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true, ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
@ -353,7 +353,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
processors := ca_processors.TestProcessors() processors := ca_processors.TestProcessors()
processors.NodeGroupListProcessor = nodegroups.NewAutoprovisioningNodeGroupListProcessor() processors.NodeGroupListProcessor = nodegroups.NewAutoprovisioningNodeGroupListProcessor()
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true, ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
@ -492,7 +492,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{n1}, later) clusterState.UpdateNodes([]*apiv1.Node{n1}, later)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true, ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,
@ -629,7 +629,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true, ScaleDownEnabled: true,
ScaleDownUtilizationThreshold: 0.5, ScaleDownUtilizationThreshold: 0.5,

View File

@ -24,7 +24,7 @@ import (
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/deletetaint" "k8s.io/autoscaler/cluster-autoscaler/utils/deletetaint"
@ -379,7 +379,7 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
MaxNodeProvisionTime: 45 * time.Minute, MaxNodeProvisionTime: 45 * time.Minute,
}, },
CloudProvider: provider, CloudProvider: provider,
@ -476,7 +476,7 @@ func TestRemoveFixNodeTargetSize(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
MaxNodeProvisionTime: 45 * time.Minute, MaxNodeProvisionTime: 45 * time.Minute,
}, },
CloudProvider: provider, CloudProvider: provider,

View File

@ -33,7 +33,6 @@ import (
kube_flag "k8s.io/apiserver/pkg/util/flag" kube_flag "k8s.io/apiserver/pkg/util/flag"
cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder" cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder"
"k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/config/static"
"k8s.io/autoscaler/cluster-autoscaler/core" "k8s.io/autoscaler/cluster-autoscaler/core"
"k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander"
@ -151,7 +150,7 @@ var (
regional = flag.Bool("regional", false, "Cluster is regional.") regional = flag.Bool("regional", false, "Cluster is regional.")
) )
func createAutoscalingOptions() static.AutoscalingOptions { func createAutoscalingOptions() config.AutoscalingOptions {
minCoresTotal, maxCoresTotal, err := parseMinMaxFlag(*coresTotal) minCoresTotal, maxCoresTotal, err := parseMinMaxFlag(*coresTotal)
if err != nil { if err != nil {
glog.Fatalf("Failed to parse flags: %v", err) glog.Fatalf("Failed to parse flags: %v", err)
@ -169,7 +168,7 @@ func createAutoscalingOptions() static.AutoscalingOptions {
glog.Fatalf("Failed to parse flags: %v", err) glog.Fatalf("Failed to parse flags: %v", err)
} }
return static.AutoscalingOptions{ return config.AutoscalingOptions{
CloudConfig: *cloudConfig, CloudConfig: *cloudConfig,
CloudProviderName: *cloudProviderFlag, CloudProviderName: *cloudProviderFlag,
NodeGroupAutoDiscovery: *nodeGroupAutoDiscoveryFlag, NodeGroupAutoDiscovery: *nodeGroupAutoDiscoveryFlag,
@ -456,8 +455,8 @@ func minMaxFlagString(min, max int64) string {
return fmt.Sprintf("%v:%v", min, max) return fmt.Sprintf("%v:%v", min, max)
} }
func parseMultipleGpuLimits(flags MultiStringFlag) ([]static.GpuLimits, error) { func parseMultipleGpuLimits(flags MultiStringFlag) ([]config.GpuLimits, error) {
parsedFlags := make([]static.GpuLimits, 0, len(flags)) parsedFlags := make([]config.GpuLimits, 0, len(flags))
for _, flag := range flags { for _, flag := range flags {
parsedFlag, err := parseSingleGpuLimit(flag) parsedFlag, err := parseSingleGpuLimit(flag)
if err != nil { if err != nil {
@ -468,30 +467,30 @@ func parseMultipleGpuLimits(flags MultiStringFlag) ([]static.GpuLimits, error) {
return parsedFlags, nil return parsedFlags, nil
} }
func parseSingleGpuLimit(config string) (static.GpuLimits, error) { func parseSingleGpuLimit(limits string) (config.GpuLimits, error) {
parts := strings.Split(config, ":") parts := strings.Split(limits, ":")
if len(parts) != 3 { if len(parts) != 3 {
return static.GpuLimits{}, fmt.Errorf("Incorrect gpu limit specification: %v", config) return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit specification: %v", limits)
} }
gpuType := parts[0] gpuType := parts[0]
minVal, err := strconv.ParseInt(parts[1], 10, 64) minVal, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil { if err != nil {
return static.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is not integer: %v", config) return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is not integer: %v", limits)
} }
maxVal, err := strconv.ParseInt(parts[2], 10, 64) maxVal, err := strconv.ParseInt(parts[2], 10, 64)
if err != nil { if err != nil {
return static.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is not integer: %v", config) return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is not integer: %v", limits)
} }
if minVal < 0 { if minVal < 0 {
return static.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is less than 0; %v", config) return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is less than 0; %v", limits)
} }
if maxVal < 0 { if maxVal < 0 {
return static.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is less than 0; %v", config) return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - max is less than 0; %v", limits)
} }
if minVal > maxVal { if minVal > maxVal {
return static.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is greater than max; %v", config) return config.GpuLimits{}, fmt.Errorf("Incorrect gpu limit - min is greater than max; %v", limits)
} }
parsedGpuLimits := static.GpuLimits{ parsedGpuLimits := config.GpuLimits{
GpuType: gpuType, GpuType: gpuType,
Min: minVal, Min: minVal,
Max: maxVal, Max: maxVal,

View File

@ -19,15 +19,16 @@ package main
import ( import (
"testing" "testing"
"k8s.io/autoscaler/cluster-autoscaler/config"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/autoscaler/cluster-autoscaler/config/static"
) )
func TestParseSingleGpuLimit(t *testing.T) { func TestParseSingleGpuLimit(t *testing.T) {
type testcase struct { type testcase struct {
input string input string
expectError bool expectError bool
expectedLimits static.GpuLimits expectedLimits config.GpuLimits
expectedErrorMessage string expectedErrorMessage string
} }
@ -35,7 +36,7 @@ func TestParseSingleGpuLimit(t *testing.T) {
{ {
input: "gpu:1:10", input: "gpu:1:10",
expectError: false, expectError: false,
expectedLimits: static.GpuLimits{ expectedLimits: config.GpuLimits{
GpuType: "gpu", GpuType: "gpu",
Min: 1, Min: 1,
Max: 10, Max: 10,

View File

@ -24,7 +24,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test" . "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@ -56,7 +56,7 @@ func TestAutoprovisioningNodeGroupManager(t *testing.T) {
provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil, provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil,
func(string) error { return tc.createNodeGroupErr }, nil, nil, nil) func(string) error { return tc.createNodeGroupErr }, nil, nil, nil)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
NodeAutoprovisioningEnabled: true, NodeAutoprovisioningEnabled: true,
}, },
CloudProvider: provider, CloudProvider: provider,
@ -107,7 +107,7 @@ func TestRemoveUnneededNodeGroups(t *testing.T) {
fakeRecorder := kube_util.CreateEventRecorder(fakeClient) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
NodeAutoprovisioningEnabled: true, NodeAutoprovisioningEnabled: true,
}, },
CloudProvider: provider, CloudProvider: provider,

View File

@ -21,7 +21,7 @@ import (
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/config/static" "k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/context"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test" . "k8s.io/autoscaler/cluster-autoscaler/utils/test"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
@ -47,7 +47,7 @@ func TestAutoprovisioningNGLProcessor(t *testing.T) {
provider.AddNodeGroup("ng1", 1, 5, 3) provider.AddNodeGroup("ng1", 1, 5, 3)
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
MaxAutoprovisionedNodeGroupCount: 1, MaxAutoprovisionedNodeGroupCount: 1,
NodeAutoprovisioningEnabled: true, NodeAutoprovisioningEnabled: true,
}, },
@ -85,7 +85,7 @@ func TestAutoprovisioningNGLProcessorTooMany(t *testing.T) {
provider.AddAutoprovisionedNodeGroup("autoprovisioned-X1", 0, 1000, 0, "X1") provider.AddAutoprovisionedNodeGroup("autoprovisioned-X1", 0, 1000, 0, "X1")
context := &context.AutoscalingContext{ context := &context.AutoscalingContext{
AutoscalingOptions: static.AutoscalingOptions{ AutoscalingOptions: config.AutoscalingOptions{
MaxAutoprovisionedNodeGroupCount: 1, MaxAutoprovisionedNodeGroupCount: 1,
NodeAutoprovisioningEnabled: true, NodeAutoprovisioningEnabled: true,
}, },