Log event on scale-up timeout

This commit is contained in:
Maciej Pytel 2017-09-01 14:19:14 +02:00
parent a86268f114
commit a440d92a60
6 changed files with 82 additions and 38 deletions

View File

@ -24,6 +24,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/api" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/api"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/utils/deletetaint" "k8s.io/autoscaler/cluster-autoscaler/utils/deletetaint"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@ -128,10 +129,11 @@ type ClusterStateRegistry struct {
nodeGroupBackoffInfo map[string]scaleUpBackoff nodeGroupBackoffInfo map[string]scaleUpBackoff
lastStatus *api.ClusterAutoscalerStatus lastStatus *api.ClusterAutoscalerStatus
lastScaleDownUpdateTime time.Time lastScaleDownUpdateTime time.Time
logRecorder *utils.LogEventRecorder
} }
// NewClusterStateRegistry creates new ClusterStateRegistry. // NewClusterStateRegistry creates new ClusterStateRegistry.
func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig) *ClusterStateRegistry { func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig, logRecorder *utils.LogEventRecorder) *ClusterStateRegistry {
emptyStatus := &api.ClusterAutoscalerStatus{ emptyStatus := &api.ClusterAutoscalerStatus{
ClusterwideConditions: make([]api.ClusterAutoscalerCondition, 0), ClusterwideConditions: make([]api.ClusterAutoscalerCondition, 0),
NodeGroupStatuses: make([]api.NodeGroupStatus, 0), NodeGroupStatuses: make([]api.NodeGroupStatus, 0),
@ -149,6 +151,7 @@ func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config C
candidatesForScaleDown: make(map[string][]string), candidatesForScaleDown: make(map[string][]string),
nodeGroupBackoffInfo: make(map[string]scaleUpBackoff), nodeGroupBackoffInfo: make(map[string]scaleUpBackoff),
lastStatus: emptyStatus, lastStatus: emptyStatus,
logRecorder: logRecorder,
} }
} }
@ -202,6 +205,9 @@ func (csr *ClusterStateRegistry) updateScaleRequests(currentTime time.Time) {
if !csr.IsNodeGroupScalingUp(sur.NodeGroupName) { if !csr.IsNodeGroupScalingUp(sur.NodeGroupName) {
glog.Warningf("Scale-up timed out for node group %v after %v", glog.Warningf("Scale-up timed out for node group %v after %v",
sur.NodeGroupName, currentTime.Sub(sur.Time)) sur.NodeGroupName, currentTime.Sub(sur.Time))
csr.logRecorder.Eventf(apiv1.EventTypeWarning, "ScaleUpTimedOut",
"Nodes added to group %s failed to register within %v",
sur.NodeGroupName, currentTime.Sub(sur.Time))
csr.backoffNodeGroup(sur.NodeGroupName, currentTime) csr.backoffNodeGroup(sur.NodeGroupName, currentTime)
} }
} }

View File

@ -24,7 +24,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/api" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/api"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test" . "k8s.io/autoscaler/cluster-autoscaler/utils/test"
"k8s.io/client-go/kubernetes/fake"
kube_record "k8s.io/client-go/tools/record"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -45,10 +48,12 @@ func TestOKWithScaleUp(t *testing.T) {
provider.AddNode("ng2", ng2_1) provider.AddNode("ng2", ng2_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
clusterstate.RegisterScaleUp(&ScaleUpRequest{ clusterstate.RegisterScaleUp(&ScaleUpRequest{
NodeGroupName: "ng1", NodeGroupName: "ng1",
Increase: 4, Increase: 4,
@ -88,10 +93,12 @@ func TestEmptyOK(t *testing.T) {
provider.AddNodeGroup("ng1", 0, 10, 0) provider.AddNodeGroup("ng1", 0, 10, 0)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{}, now.Add(-5*time.Second)) err := clusterstate.UpdateNodes([]*apiv1.Node{}, now.Add(-5*time.Second))
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsClusterHealthy())
@ -128,10 +135,12 @@ func TestOKOneUnreadyNode(t *testing.T) {
provider.AddNode("ng2", ng2_1) provider.AddNode("ng2", ng2_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsClusterHealthy())
@ -163,10 +172,12 @@ func TestNodeWithoutNodeGroupDontCrash(t *testing.T) {
provider := testprovider.NewTestCloudProvider(nil, nil) provider := testprovider.NewTestCloudProvider(nil, nil)
provider.AddNode("no_ng", noNgNode) provider.AddNode("no_ng", noNgNode)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{noNgNode}, now) err := clusterstate.UpdateNodes([]*apiv1.Node{noNgNode}, now)
assert.NoError(t, err) assert.NoError(t, err)
clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{noNgNode}, now) clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{noNgNode}, now)
@ -187,10 +198,12 @@ func TestOKOneUnreadyNodeWithScaleDownCandidate(t *testing.T) {
provider.AddNode("ng2", ng2_1) provider.AddNode("ng2", ng2_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now)
clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{ng1_1}, now) clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{ng1_1}, now)
@ -248,10 +261,13 @@ func TestMissingNodes(t *testing.T) {
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
provider.AddNode("ng2", ng2_1) provider.AddNode("ng2", ng2_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsClusterHealthy())
@ -287,10 +303,12 @@ func TestToManyUnready(t *testing.T) {
provider.AddNode("ng2", ng2_1) provider.AddNode("ng2", ng2_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now)
assert.NoError(t, err) assert.NoError(t, err)
assert.False(t, clusterstate.IsClusterHealthy()) assert.False(t, clusterstate.IsClusterHealthy())
@ -308,10 +326,12 @@ func TestExpiredScaleUp(t *testing.T) {
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
clusterstate.RegisterScaleUp(&ScaleUpRequest{ clusterstate.RegisterScaleUp(&ScaleUpRequest{
NodeGroupName: "ng1", NodeGroupName: "ng1",
Increase: 4, Increase: 4,
@ -331,10 +351,12 @@ func TestRegisterScaleDown(t *testing.T) {
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
now := time.Now() now := time.Now()
@ -381,10 +403,12 @@ func TestUpcomingNodes(t *testing.T) {
provider.AddNode("ng4", ng4_1) provider.AddNode("ng4", ng4_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1}, now) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1}, now)
assert.NoError(t, err) assert.NoError(t, err)
@ -401,10 +425,12 @@ func TestIncorrectSize(t *testing.T) {
provider.AddNodeGroup("ng1", 1, 10, 5) provider.AddNodeGroup("ng1", 1, 10, 5)
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
now := time.Now() now := time.Now()
clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-5*time.Minute)) clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-5*time.Minute))
incorrect := clusterstate.incorrectNodeGroupSizes["ng1"] incorrect := clusterstate.incorrectNodeGroupSizes["ng1"]
@ -435,10 +461,12 @@ func TestUnregisteredNodes(t *testing.T) {
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
provider.AddNode("ng1", ng1_2) provider.AddNode("ng1", ng1_2)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, time.Now().Add(-time.Minute)) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, time.Now().Add(-time.Minute))
assert.NoError(t, err) assert.NoError(t, err)
@ -572,10 +600,12 @@ func TestScaleUpBackoff(t *testing.T) {
provider.AddNode("ng1", ng1_3) provider.AddNode("ng1", ng1_3)
assert.NotNil(t, provider) assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
// Fail a scale-up, node group should be still healthy, but should backoff from scale-ups // Fail a scale-up, node group should be still healthy, but should backoff from scale-ups
clusterstate.RegisterScaleUp(&ScaleUpRequest{ clusterstate.RegisterScaleUp(&ScaleUpRequest{

View File

@ -131,7 +131,7 @@ func NewAutoscalingContext(options AutoscalingOptions, predicateChecker *simulat
MaxTotalUnreadyPercentage: options.MaxTotalUnreadyPercentage, MaxTotalUnreadyPercentage: options.MaxTotalUnreadyPercentage,
OkTotalUnreadyCount: options.OkTotalUnreadyCount, OkTotalUnreadyCount: options.OkTotalUnreadyCount,
} }
clusterStateRegistry := clusterstate.NewClusterStateRegistry(cloudProvider, clusterStateConfig) clusterStateRegistry := clusterstate.NewClusterStateRegistry(cloudProvider, clusterStateConfig, logEventRecorder)
autoscalingContext := AutoscalingContext{ autoscalingContext := AutoscalingContext{
AutoscalingOptions: options, AutoscalingOptions: options,

View File

@ -94,7 +94,7 @@ func TestFindUnneededNodes(t *testing.T) {
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
}, },
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
PredicateChecker: simulator.NewTestPredicateChecker(), PredicateChecker: simulator.NewTestPredicateChecker(),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
@ -166,7 +166,7 @@ func TestFindUnneededMaxCandidates(t *testing.T) {
ScaleDownUtilizationThreshold: 0.35, ScaleDownUtilizationThreshold: 0.35,
ScaleDownNonEmptyCandidatesCount: numCandidates, ScaleDownNonEmptyCandidatesCount: numCandidates,
}, },
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
PredicateChecker: simulator.NewTestPredicateChecker(), PredicateChecker: simulator.NewTestPredicateChecker(),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
@ -377,7 +377,7 @@ func TestScaleDown(t *testing.T) {
CloudProvider: provider, CloudProvider: provider,
ClientSet: fakeClient, ClientSet: fakeClient,
Recorder: fakeRecorder, Recorder: fakeRecorder,
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
scaleDown := NewScaleDown(context) scaleDown := NewScaleDown(context)
@ -470,7 +470,7 @@ func TestScaleDownEmptyMultipleNodeGroups(t *testing.T) {
CloudProvider: provider, CloudProvider: provider,
ClientSet: fakeClient, ClientSet: fakeClient,
Recorder: fakeRecorder, Recorder: fakeRecorder,
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
scaleDown := NewScaleDown(context) scaleDown := NewScaleDown(context)
@ -540,7 +540,7 @@ func TestScaleDownEmptySingleNodeGroup(t *testing.T) {
CloudProvider: provider, CloudProvider: provider,
ClientSet: fakeClient, ClientSet: fakeClient,
Recorder: fakeRecorder, Recorder: fakeRecorder,
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
scaleDown := NewScaleDown(context) scaleDown := NewScaleDown(context)
@ -603,7 +603,7 @@ func TestNoScaleDownUnready(t *testing.T) {
CloudProvider: provider, CloudProvider: provider,
ClientSet: fakeClient, ClientSet: fakeClient,
Recorder: fakeRecorder, Recorder: fakeRecorder,
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
@ -711,7 +711,7 @@ func TestScaleDownNoMove(t *testing.T) {
CloudProvider: provider, CloudProvider: provider,
ClientSet: fakeClient, ClientSet: fakeClient,
Recorder: fakeRecorder, Recorder: fakeRecorder,
ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder),
LogRecorder: fakeLogRecorder, LogRecorder: fakeLogRecorder,
} }
scaleDown := NewScaleDown(context) scaleDown := NewScaleDown(context)

View File

@ -77,10 +77,11 @@ func TestScaleUpOK(t *testing.T) {
provider.AddNode("ng2", n2) provider.AddNode("ng2", n2)
assert.NotNil(t, provider) assert.NotNil(t, provider)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_record.NewFakeRecorder(5) fakeRecorder := kube_record.NewFakeRecorder(5)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
context := &AutoscalingContext{ context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
@ -147,7 +148,9 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
provider.AddNode("ng1", n1) provider.AddNode("ng1", n1)
provider.AddNode("ng2", n2) provider.AddNode("ng2", n2)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{ clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{
NodeGroupName: "ng2", NodeGroupName: "ng2",
Increase: 1, Increase: 1,
@ -156,8 +159,6 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
}) })
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{ context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
@ -212,7 +213,9 @@ func TestScaleUpNodeComingHasScale(t *testing.T) {
provider.AddNode("ng1", n1) provider.AddNode("ng1", n1)
provider.AddNode("ng2", n2) provider.AddNode("ng2", n2)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{ clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{
NodeGroupName: "ng2", NodeGroupName: "ng2",
Increase: 1, Increase: 1,
@ -221,8 +224,6 @@ func TestScaleUpNodeComingHasScale(t *testing.T) {
}) })
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{ context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
@ -277,10 +278,10 @@ func TestScaleUpUnhealthy(t *testing.T) {
provider.AddNode("ng1", n1) provider.AddNode("ng1", n1)
provider.AddNode("ng2", n2) provider.AddNode("ng2", n2)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_util.CreateEventRecorder(fakeClient) fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
context := &AutoscalingContext{ context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
@ -326,10 +327,10 @@ func TestScaleUpNoHelp(t *testing.T) {
provider.AddNode("ng1", n1) provider.AddNode("ng1", n1)
assert.NotNil(t, provider) assert.NotNil(t, provider)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now())
fakeRecorder := kube_record.NewFakeRecorder(5) fakeRecorder := kube_record.NewFakeRecorder(5)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now())
context := &AutoscalingContext{ context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,
@ -404,10 +405,10 @@ func TestScaleUpBalanceGroups(t *testing.T) {
return true, &apiv1.PodList{Items: []apiv1.Pod{*(podMap[matches[0]])}}, nil return true, &apiv1.PodList{Items: []apiv1.Pod{*(podMap[matches[0]])}}, nil
}) })
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes(nodes, time.Now())
fakeRecorder := kube_record.NewFakeRecorder(5) fakeRecorder := kube_record.NewFakeRecorder(5)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder)
clusterState.UpdateNodes(nodes, time.Now())
context := &AutoscalingContext{ context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{ AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName, EstimatorName: estimator.BinpackingEstimatorName,

View File

@ -23,11 +23,14 @@ import (
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test" . "k8s.io/autoscaler/cluster-autoscaler/utils/test"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
kube_record "k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@ -152,10 +155,12 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) {
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
provider.AddNode("ng1", ng1_2) provider.AddNode("ng1", ng1_2)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour)) err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour))
assert.NoError(t, err) assert.NoError(t, err)
@ -229,10 +234,12 @@ func TestRemoveFixNodeTargetSize(t *testing.T) {
provider.AddNodeGroup("ng1", 1, 10, 3) provider.AddNodeGroup("ng1", 1, 10, 3)
provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_1)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10, MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1, OkTotalUnreadyCount: 1,
}) }, fakeLogRecorder)
err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour)) err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour))
assert.NoError(t, err) assert.NoError(t, err)