diff --git a/cluster-autoscaler/clusterstate/clusterstate.go b/cluster-autoscaler/clusterstate/clusterstate.go index a2461fc92f..bb3cb2d12b 100644 --- a/cluster-autoscaler/clusterstate/clusterstate.go +++ b/cluster-autoscaler/clusterstate/clusterstate.go @@ -24,6 +24,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/api" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/utils/deletetaint" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" @@ -128,10 +129,11 @@ type ClusterStateRegistry struct { nodeGroupBackoffInfo map[string]scaleUpBackoff lastStatus *api.ClusterAutoscalerStatus lastScaleDownUpdateTime time.Time + logRecorder *utils.LogEventRecorder } // NewClusterStateRegistry creates new ClusterStateRegistry. -func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig) *ClusterStateRegistry { +func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig, logRecorder *utils.LogEventRecorder) *ClusterStateRegistry { emptyStatus := &api.ClusterAutoscalerStatus{ ClusterwideConditions: make([]api.ClusterAutoscalerCondition, 0), NodeGroupStatuses: make([]api.NodeGroupStatus, 0), @@ -149,6 +151,7 @@ func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config C candidatesForScaleDown: make(map[string][]string), nodeGroupBackoffInfo: make(map[string]scaleUpBackoff), lastStatus: emptyStatus, + logRecorder: logRecorder, } } @@ -202,6 +205,9 @@ func (csr *ClusterStateRegistry) updateScaleRequests(currentTime time.Time) { if !csr.IsNodeGroupScalingUp(sur.NodeGroupName) { glog.Warningf("Scale-up timed out for node group %v after %v", sur.NodeGroupName, currentTime.Sub(sur.Time)) + csr.logRecorder.Eventf(apiv1.EventTypeWarning, "ScaleUpTimedOut", + "Nodes added to group %s failed to register within %v", + sur.NodeGroupName, currentTime.Sub(sur.Time)) csr.backoffNodeGroup(sur.NodeGroupName, currentTime) } } diff --git a/cluster-autoscaler/clusterstate/clusterstate_test.go b/cluster-autoscaler/clusterstate/clusterstate_test.go index a9ccc99b70..3a65da8ab3 100644 --- a/cluster-autoscaler/clusterstate/clusterstate_test.go +++ b/cluster-autoscaler/clusterstate/clusterstate_test.go @@ -24,7 +24,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/api" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + "k8s.io/client-go/kubernetes/fake" + kube_record "k8s.io/client-go/tools/record" "github.com/stretchr/testify/assert" ) @@ -45,10 +48,12 @@ func TestOKWithScaleUp(t *testing.T) { provider.AddNode("ng2", ng2_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) clusterstate.RegisterScaleUp(&ScaleUpRequest{ NodeGroupName: "ng1", Increase: 4, @@ -88,10 +93,12 @@ func TestEmptyOK(t *testing.T) { provider.AddNodeGroup("ng1", 0, 10, 0) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{}, now.Add(-5*time.Second)) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) @@ -128,10 +135,12 @@ func TestOKOneUnreadyNode(t *testing.T) { provider.AddNode("ng2", ng2_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) @@ -163,10 +172,12 @@ func TestNodeWithoutNodeGroupDontCrash(t *testing.T) { provider := testprovider.NewTestCloudProvider(nil, nil) provider.AddNode("no_ng", noNgNode) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{noNgNode}, now) assert.NoError(t, err) clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{noNgNode}, now) @@ -187,10 +198,12 @@ func TestOKOneUnreadyNodeWithScaleDownCandidate(t *testing.T) { provider.AddNode("ng2", ng2_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{ng1_1}, now) @@ -248,10 +261,13 @@ func TestMissingNodes(t *testing.T) { provider.AddNode("ng1", ng1_1) provider.AddNode("ng2", ng2_1) assert.NotNil(t, provider) + + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) @@ -287,10 +303,12 @@ func TestToManyUnready(t *testing.T) { provider.AddNode("ng2", ng2_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) assert.NoError(t, err) assert.False(t, clusterstate.IsClusterHealthy()) @@ -308,10 +326,12 @@ func TestExpiredScaleUp(t *testing.T) { provider.AddNode("ng1", ng1_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) clusterstate.RegisterScaleUp(&ScaleUpRequest{ NodeGroupName: "ng1", Increase: 4, @@ -331,10 +351,12 @@ func TestRegisterScaleDown(t *testing.T) { provider.AddNode("ng1", ng1_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) now := time.Now() @@ -381,10 +403,12 @@ func TestUpcomingNodes(t *testing.T) { provider.AddNode("ng4", ng4_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1}, now) assert.NoError(t, err) @@ -401,10 +425,12 @@ func TestIncorrectSize(t *testing.T) { provider.AddNodeGroup("ng1", 1, 10, 5) provider.AddNode("ng1", ng1_1) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) now := time.Now() clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-5*time.Minute)) incorrect := clusterstate.incorrectNodeGroupSizes["ng1"] @@ -435,10 +461,12 @@ func TestUnregisteredNodes(t *testing.T) { provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_2) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, time.Now().Add(-time.Minute)) assert.NoError(t, err) @@ -572,10 +600,12 @@ func TestScaleUpBackoff(t *testing.T) { provider.AddNode("ng1", ng1_3) assert.NotNil(t, provider) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) // Fail a scale-up, node group should be still healthy, but should backoff from scale-ups clusterstate.RegisterScaleUp(&ScaleUpRequest{ diff --git a/cluster-autoscaler/core/autoscaling_context.go b/cluster-autoscaler/core/autoscaling_context.go index 7210cfabd6..8a937ffca2 100644 --- a/cluster-autoscaler/core/autoscaling_context.go +++ b/cluster-autoscaler/core/autoscaling_context.go @@ -131,7 +131,7 @@ func NewAutoscalingContext(options AutoscalingOptions, predicateChecker *simulat MaxTotalUnreadyPercentage: options.MaxTotalUnreadyPercentage, OkTotalUnreadyCount: options.OkTotalUnreadyCount, } - clusterStateRegistry := clusterstate.NewClusterStateRegistry(cloudProvider, clusterStateConfig) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(cloudProvider, clusterStateConfig, logEventRecorder) autoscalingContext := AutoscalingContext{ AutoscalingOptions: options, diff --git a/cluster-autoscaler/core/scale_down_test.go b/cluster-autoscaler/core/scale_down_test.go index 1b314d9398..45f9702857 100644 --- a/cluster-autoscaler/core/scale_down_test.go +++ b/cluster-autoscaler/core/scale_down_test.go @@ -94,7 +94,7 @@ func TestFindUnneededNodes(t *testing.T) { AutoscalingOptions: AutoscalingOptions{ ScaleDownUtilizationThreshold: 0.35, }, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), PredicateChecker: simulator.NewTestPredicateChecker(), LogRecorder: fakeLogRecorder, } @@ -166,7 +166,7 @@ func TestFindUnneededMaxCandidates(t *testing.T) { ScaleDownUtilizationThreshold: 0.35, ScaleDownNonEmptyCandidatesCount: numCandidates, }, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), PredicateChecker: simulator.NewTestPredicateChecker(), LogRecorder: fakeLogRecorder, } @@ -377,7 +377,7 @@ func TestScaleDown(t *testing.T) { CloudProvider: provider, ClientSet: fakeClient, Recorder: fakeRecorder, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), LogRecorder: fakeLogRecorder, } scaleDown := NewScaleDown(context) @@ -470,7 +470,7 @@ func TestScaleDownEmptyMultipleNodeGroups(t *testing.T) { CloudProvider: provider, ClientSet: fakeClient, Recorder: fakeRecorder, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), LogRecorder: fakeLogRecorder, } scaleDown := NewScaleDown(context) @@ -540,7 +540,7 @@ func TestScaleDownEmptySingleNodeGroup(t *testing.T) { CloudProvider: provider, ClientSet: fakeClient, Recorder: fakeRecorder, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), LogRecorder: fakeLogRecorder, } scaleDown := NewScaleDown(context) @@ -603,7 +603,7 @@ func TestNoScaleDownUnready(t *testing.T) { CloudProvider: provider, ClientSet: fakeClient, Recorder: fakeRecorder, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), LogRecorder: fakeLogRecorder, } @@ -711,7 +711,7 @@ func TestScaleDownNoMove(t *testing.T) { CloudProvider: provider, ClientSet: fakeClient, Recorder: fakeRecorder, - ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}), + ClusterStateRegistry: clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder), LogRecorder: fakeLogRecorder, } scaleDown := NewScaleDown(context) diff --git a/cluster-autoscaler/core/scale_up_test.go b/cluster-autoscaler/core/scale_up_test.go index 1449e1d65c..3b81cfb2d3 100644 --- a/cluster-autoscaler/core/scale_up_test.go +++ b/cluster-autoscaler/core/scale_up_test.go @@ -77,10 +77,11 @@ func TestScaleUpOK(t *testing.T) { provider.AddNode("ng2", n2) assert.NotNil(t, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) fakeRecorder := kube_record.NewFakeRecorder(5) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) + clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) + context := &AutoscalingContext{ AutoscalingOptions: AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, @@ -147,7 +148,9 @@ func TestScaleUpNodeComingNoScale(t *testing.T) { provider.AddNode("ng1", n1) provider.AddNode("ng2", n2) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) + fakeRecorder := kube_util.CreateEventRecorder(fakeClient) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{ NodeGroupName: "ng2", Increase: 1, @@ -156,8 +159,6 @@ func TestScaleUpNodeComingNoScale(t *testing.T) { }) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) - fakeRecorder := kube_util.CreateEventRecorder(fakeClient) - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) context := &AutoscalingContext{ AutoscalingOptions: AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, @@ -212,7 +213,9 @@ func TestScaleUpNodeComingHasScale(t *testing.T) { provider.AddNode("ng1", n1) provider.AddNode("ng2", n2) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) + fakeRecorder := kube_util.CreateEventRecorder(fakeClient) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{ NodeGroupName: "ng2", Increase: 1, @@ -221,8 +224,6 @@ func TestScaleUpNodeComingHasScale(t *testing.T) { }) clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) - fakeRecorder := kube_util.CreateEventRecorder(fakeClient) - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) context := &AutoscalingContext{ AutoscalingOptions: AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, @@ -277,10 +278,10 @@ func TestScaleUpUnhealthy(t *testing.T) { provider.AddNode("ng1", n1) provider.AddNode("ng2", n2) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) fakeRecorder := kube_util.CreateEventRecorder(fakeClient) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) + clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) context := &AutoscalingContext{ AutoscalingOptions: AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, @@ -326,10 +327,10 @@ func TestScaleUpNoHelp(t *testing.T) { provider.AddNode("ng1", n1) assert.NotNil(t, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) - clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now()) fakeRecorder := kube_record.NewFakeRecorder(5) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) + clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now()) context := &AutoscalingContext{ AutoscalingOptions: AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, @@ -404,10 +405,10 @@ func TestScaleUpBalanceGroups(t *testing.T) { return true, &apiv1.PodList{Items: []apiv1.Pod{*(podMap[matches[0]])}}, nil }) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}) - clusterState.UpdateNodes(nodes, time.Now()) fakeRecorder := kube_record.NewFakeRecorder(5) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder) + clusterState.UpdateNodes(nodes, time.Now()) context := &AutoscalingContext{ AutoscalingOptions: AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, diff --git a/cluster-autoscaler/core/utils_test.go b/cluster-autoscaler/core/utils_test.go index 1ea7205b1f..b209888a9c 100644 --- a/cluster-autoscaler/core/utils_test.go +++ b/cluster-autoscaler/core/utils_test.go @@ -23,11 +23,14 @@ import ( testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/simulator" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + kube_record "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/testapi" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -152,10 +155,12 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) { provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_2) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour)) assert.NoError(t, err) @@ -229,10 +234,12 @@ func TestRemoveFixNodeTargetSize(t *testing.T) { provider.AddNodeGroup("ng1", 1, 10, 3) provider.AddNode("ng1", ng1_1) + fakeClient := &fake.Clientset{} + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }) + }, fakeLogRecorder) err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour)) assert.NoError(t, err)