Merge pull request #119 from MaciekPytel/status_configmap_improvement
Status configmap fixes
This commit is contained in:
commit
8d59afa484
|
|
@ -32,8 +32,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// StatusConfigMapNamespace is the namespace where ConfigMap with status is stored.
|
|
||||||
StatusConfigMapNamespace = "kube-system"
|
|
||||||
// StatusConfigMapName is the name of ConfigMap with status.
|
// StatusConfigMapName is the name of ConfigMap with status.
|
||||||
StatusConfigMapName = "cluster-autoscaler-status"
|
StatusConfigMapName = "cluster-autoscaler-status"
|
||||||
// ConfigMapLastUpdatedKey is the name of annotation informing about status ConfigMap last update.
|
// ConfigMapLastUpdatedKey is the name of annotation informing about status ConfigMap last update.
|
||||||
|
|
@ -64,11 +62,11 @@ func (ler *LogEventRecorder) Eventf(eventtype, reason, message string, args ...i
|
||||||
// NewStatusMapRecorder creates a LogEventRecorder creating events on status configmap.
|
// NewStatusMapRecorder creates a LogEventRecorder creating events on status configmap.
|
||||||
// If the configmap doesn't exist it will be created (with 'Initializing' status).
|
// If the configmap doesn't exist it will be created (with 'Initializing' status).
|
||||||
// If active == false the map will not be created and no events will be recorded.
|
// If active == false the map will not be created and no events will be recorded.
|
||||||
func NewStatusMapRecorder(kubeClient kube_client.Interface, recorder record.EventRecorder, active bool) (*LogEventRecorder, error) {
|
func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, recorder record.EventRecorder, active bool) (*LogEventRecorder, error) {
|
||||||
var mapObj runtime.Object
|
var mapObj runtime.Object
|
||||||
var err error
|
var err error
|
||||||
if active {
|
if active {
|
||||||
mapObj, err = WriteStatusConfigMap(kubeClient, "Initializing", nil)
|
mapObj, err = WriteStatusConfigMap(kubeClient, namespace, "Initializing", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("Failed to init status ConfigMap")
|
return nil, errors.New("Failed to init status ConfigMap")
|
||||||
}
|
}
|
||||||
|
|
@ -83,22 +81,25 @@ func NewStatusMapRecorder(kubeClient kube_client.Interface, recorder record.Even
|
||||||
// WriteStatusConfigMap writes updates status ConfigMap with a given message or creates a new
|
// WriteStatusConfigMap writes updates status ConfigMap with a given message or creates a new
|
||||||
// ConfigMap if it doesn't exist. If logRecorder is passed and configmap update is successfull
|
// ConfigMap if it doesn't exist. If logRecorder is passed and configmap update is successfull
|
||||||
// logRecorder's internal reference will be updated.
|
// logRecorder's internal reference will be updated.
|
||||||
func WriteStatusConfigMap(kubeClient kube_client.Interface, msg string, logRecorder *LogEventRecorder) (*apiv1.ConfigMap, error) {
|
func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, msg string, logRecorder *LogEventRecorder) (*apiv1.ConfigMap, error) {
|
||||||
statusUpdateTime := time.Now()
|
statusUpdateTime := time.Now()
|
||||||
statusMsg := fmt.Sprintf("Cluster-autoscaler status at %v:\n%v", statusUpdateTime, msg)
|
statusMsg := fmt.Sprintf("Cluster-autoscaler status at %v:\n%v", statusUpdateTime, msg)
|
||||||
var configMap *apiv1.ConfigMap
|
var configMap *apiv1.ConfigMap
|
||||||
var getStatusError, writeStatusError error
|
var getStatusError, writeStatusError error
|
||||||
var errMsg string
|
var errMsg string
|
||||||
maps := kubeClient.CoreV1().ConfigMaps(StatusConfigMapNamespace)
|
maps := kubeClient.CoreV1().ConfigMaps(namespace)
|
||||||
configMap, getStatusError = maps.Get(StatusConfigMapName, metav1.GetOptions{})
|
configMap, getStatusError = maps.Get(StatusConfigMapName, metav1.GetOptions{})
|
||||||
if getStatusError == nil {
|
if getStatusError == nil {
|
||||||
configMap.Data["status"] = statusMsg
|
configMap.Data["status"] = statusMsg
|
||||||
|
if configMap.ObjectMeta.Annotations == nil {
|
||||||
|
configMap.ObjectMeta.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
configMap.ObjectMeta.Annotations[ConfigMapLastUpdatedKey] = fmt.Sprintf("%v", statusUpdateTime)
|
configMap.ObjectMeta.Annotations[ConfigMapLastUpdatedKey] = fmt.Sprintf("%v", statusUpdateTime)
|
||||||
configMap, writeStatusError = maps.Update(configMap)
|
configMap, writeStatusError = maps.Update(configMap)
|
||||||
} else if kube_errors.IsNotFound(getStatusError) {
|
} else if kube_errors.IsNotFound(getStatusError) {
|
||||||
configMap = &apiv1.ConfigMap{
|
configMap = &apiv1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: StatusConfigMapNamespace,
|
Namespace: namespace,
|
||||||
Name: StatusConfigMapName,
|
Name: StatusConfigMapName,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
ConfigMapLastUpdatedKey: fmt.Sprintf("%v", statusUpdateTime),
|
ConfigMapLastUpdatedKey: fmt.Sprintf("%v", statusUpdateTime),
|
||||||
|
|
@ -110,10 +111,10 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, msg string, logRecor
|
||||||
}
|
}
|
||||||
configMap, writeStatusError = maps.Create(configMap)
|
configMap, writeStatusError = maps.Create(configMap)
|
||||||
} else {
|
} else {
|
||||||
errMsg = "Failed to retrieve status configmap for update"
|
errMsg = fmt.Sprintf("Failed to retrieve status configmap for update: %v", getStatusError)
|
||||||
}
|
}
|
||||||
if writeStatusError != nil {
|
if writeStatusError != nil {
|
||||||
errMsg = "Failed to write status configmap"
|
errMsg = fmt.Sprintf("Failed to write status configmap: %v", writeStatusError)
|
||||||
}
|
}
|
||||||
if errMsg != "" {
|
if errMsg != "" {
|
||||||
glog.Error(errMsg)
|
glog.Error(errMsg)
|
||||||
|
|
@ -129,8 +130,8 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, msg string, logRecor
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteStatusConfigMap deletes status configmap
|
// DeleteStatusConfigMap deletes status configmap
|
||||||
func DeleteStatusConfigMap(kubeClient kube_client.Interface) error {
|
func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string) error {
|
||||||
maps := kubeClient.CoreV1().ConfigMaps(StatusConfigMapNamespace)
|
maps := kubeClient.CoreV1().ConfigMaps(namespace)
|
||||||
err := maps.Delete(StatusConfigMapName, &metav1.DeleteOptions{})
|
err := maps.Delete(StatusConfigMapName, &metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error("Failed to delete status configmap")
|
glog.Error("Failed to delete status configmap")
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@ import (
|
||||||
type testInfo struct {
|
type testInfo struct {
|
||||||
client *fake.Clientset
|
client *fake.Clientset
|
||||||
configMap *apiv1.ConfigMap
|
configMap *apiv1.ConfigMap
|
||||||
|
namespace string
|
||||||
getError error
|
getError error
|
||||||
getCalled bool
|
getCalled bool
|
||||||
updateCalled bool
|
updateCalled bool
|
||||||
|
|
@ -41,16 +42,17 @@ type testInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setUpTest(t *testing.T) *testInfo {
|
func setUpTest(t *testing.T) *testInfo {
|
||||||
|
namespace := "kube-system"
|
||||||
result := testInfo{
|
result := testInfo{
|
||||||
client: &fake.Clientset{},
|
client: &fake.Clientset{},
|
||||||
configMap: &apiv1.ConfigMap{
|
configMap: &apiv1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: StatusConfigMapNamespace,
|
Namespace: namespace,
|
||||||
Name: StatusConfigMapName,
|
Name: StatusConfigMapName,
|
||||||
Annotations: map[string]string{},
|
|
||||||
},
|
},
|
||||||
Data: map[string]string{},
|
Data: map[string]string{},
|
||||||
},
|
},
|
||||||
|
namespace: namespace,
|
||||||
getCalled: false,
|
getCalled: false,
|
||||||
updateCalled: false,
|
updateCalled: false,
|
||||||
createCalled: false,
|
createCalled: false,
|
||||||
|
|
@ -58,7 +60,7 @@ func setUpTest(t *testing.T) *testInfo {
|
||||||
}
|
}
|
||||||
result.client.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
result.client.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
get := action.(core.GetAction)
|
get := action.(core.GetAction)
|
||||||
assert.Equal(result.t, StatusConfigMapNamespace, get.GetNamespace())
|
assert.Equal(result.t, namespace, get.GetNamespace())
|
||||||
assert.Equal(result.t, StatusConfigMapName, get.GetName())
|
assert.Equal(result.t, StatusConfigMapName, get.GetName())
|
||||||
result.getCalled = true
|
result.getCalled = true
|
||||||
if result.getError != nil {
|
if result.getError != nil {
|
||||||
|
|
@ -68,13 +70,13 @@ func setUpTest(t *testing.T) *testInfo {
|
||||||
})
|
})
|
||||||
result.client.Fake.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
result.client.Fake.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
update := action.(core.UpdateAction)
|
update := action.(core.UpdateAction)
|
||||||
assert.Equal(result.t, StatusConfigMapNamespace, update.GetNamespace())
|
assert.Equal(result.t, namespace, update.GetNamespace())
|
||||||
result.updateCalled = true
|
result.updateCalled = true
|
||||||
return true, result.configMap, nil
|
return true, result.configMap, nil
|
||||||
})
|
})
|
||||||
result.client.Fake.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
result.client.Fake.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
create := action.(core.CreateAction)
|
create := action.(core.CreateAction)
|
||||||
assert.Equal(result.t, StatusConfigMapNamespace, create.GetNamespace())
|
assert.Equal(result.t, namespace, create.GetNamespace())
|
||||||
configMap := create.GetObject().(*apiv1.ConfigMap)
|
configMap := create.GetObject().(*apiv1.ConfigMap)
|
||||||
assert.Equal(result.t, StatusConfigMapName, configMap.ObjectMeta.Name)
|
assert.Equal(result.t, StatusConfigMapName, configMap.ObjectMeta.Name)
|
||||||
result.createCalled = true
|
result.createCalled = true
|
||||||
|
|
@ -85,7 +87,7 @@ func setUpTest(t *testing.T) *testInfo {
|
||||||
|
|
||||||
func TestWriteStatusConfigMapExisting(t *testing.T) {
|
func TestWriteStatusConfigMapExisting(t *testing.T) {
|
||||||
ti := setUpTest(t)
|
ti := setUpTest(t)
|
||||||
result, err := WriteStatusConfigMap(ti.client, "TEST_MSG", nil)
|
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
|
||||||
assert.Equal(t, ti.configMap, result)
|
assert.Equal(t, ti.configMap, result)
|
||||||
assert.Contains(t, result.Data["status"], "TEST_MSG")
|
assert.Contains(t, result.Data["status"], "TEST_MSG")
|
||||||
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
|
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
|
||||||
|
|
@ -98,7 +100,7 @@ func TestWriteStatusConfigMapExisting(t *testing.T) {
|
||||||
func TestWriteStatusConfigMapCreate(t *testing.T) {
|
func TestWriteStatusConfigMapCreate(t *testing.T) {
|
||||||
ti := setUpTest(t)
|
ti := setUpTest(t)
|
||||||
ti.getError = kube_errors.NewNotFound(apiv1.Resource("configmap"), "nope, not found")
|
ti.getError = kube_errors.NewNotFound(apiv1.Resource("configmap"), "nope, not found")
|
||||||
result, err := WriteStatusConfigMap(ti.client, "TEST_MSG", nil)
|
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
|
||||||
assert.Contains(t, result.Data["status"], "TEST_MSG")
|
assert.Contains(t, result.Data["status"], "TEST_MSG")
|
||||||
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
|
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
@ -110,8 +112,9 @@ func TestWriteStatusConfigMapCreate(t *testing.T) {
|
||||||
func TestWriteStatusConfigMapError(t *testing.T) {
|
func TestWriteStatusConfigMapError(t *testing.T) {
|
||||||
ti := setUpTest(t)
|
ti := setUpTest(t)
|
||||||
ti.getError = errors.New("stuff bad")
|
ti.getError = errors.New("stuff bad")
|
||||||
result, err := WriteStatusConfigMap(ti.client, "TEST_MSG", nil)
|
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "stuff bad")
|
||||||
assert.Nil(t, result)
|
assert.Nil(t, result)
|
||||||
assert.True(t, ti.getCalled)
|
assert.True(t, ti.getCalled)
|
||||||
assert.False(t, ti.updateCalled)
|
assert.False(t, ti.updateCalled)
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,8 @@ type AutoscalingOptions struct {
|
||||||
WriteStatusConfigMap bool
|
WriteStatusConfigMap bool
|
||||||
// BalanceSimilarNodeGroups enables logic that identifies node groups with similar machines and tries to balance node count between them.
|
// BalanceSimilarNodeGroups enables logic that identifies node groups with similar machines and tries to balance node count between them.
|
||||||
BalanceSimilarNodeGroups bool
|
BalanceSimilarNodeGroups bool
|
||||||
|
// ConfigNamespace is the namesapce cluster-autoscaler is running in and all related configmaps live in
|
||||||
|
ConfigNamespace string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
|
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ func TestFindUnneededNodes(t *testing.T) {
|
||||||
|
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
|
|
||||||
provider := testprovider.NewTestCloudProvider(nil, nil)
|
provider := testprovider.NewTestCloudProvider(nil, nil)
|
||||||
provider.AddNodeGroup("ng1", 1, 10, 2)
|
provider.AddNodeGroup("ng1", 1, 10, 2)
|
||||||
|
|
@ -273,7 +273,7 @@ func TestScaleDown(t *testing.T) {
|
||||||
assert.NotNil(t, provider)
|
assert.NotNil(t, provider)
|
||||||
|
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
ScaleDownUtilizationThreshold: 0.5,
|
ScaleDownUtilizationThreshold: 0.5,
|
||||||
|
|
@ -331,7 +331,7 @@ func TestNoScaleDownUnready(t *testing.T) {
|
||||||
provider.AddNode("ng1", n2)
|
provider.AddNode("ng1", n2)
|
||||||
|
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
ScaleDownUtilizationThreshold: 0.5,
|
ScaleDownUtilizationThreshold: 0.5,
|
||||||
|
|
@ -435,7 +435,7 @@ func TestScaleDownNoMove(t *testing.T) {
|
||||||
assert.NotNil(t, provider)
|
assert.NotNil(t, provider)
|
||||||
|
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
ScaleDownUtilizationThreshold: 0.5,
|
ScaleDownUtilizationThreshold: 0.5,
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ func TestScaleUpOK(t *testing.T) {
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
||||||
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
||||||
fakeRecorder := kube_record.NewFakeRecorder(5)
|
fakeRecorder := kube_record.NewFakeRecorder(5)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, kube_record.NewFakeRecorder(5), false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
@ -157,7 +157,7 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
|
||||||
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
||||||
|
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
@ -222,7 +222,7 @@ func TestScaleUpNodeComingHasScale(t *testing.T) {
|
||||||
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
||||||
|
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
@ -280,7 +280,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
||||||
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
|
||||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
@ -329,7 +329,7 @@ func TestScaleUpNoHelp(t *testing.T) {
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
||||||
clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now())
|
clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now())
|
||||||
fakeRecorder := kube_record.NewFakeRecorder(5)
|
fakeRecorder := kube_record.NewFakeRecorder(5)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, kube_record.NewFakeRecorder(5), false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
@ -407,7 +407,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
|
||||||
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
|
||||||
clusterState.UpdateNodes(nodes, time.Now())
|
clusterState.UpdateNodes(nodes, time.Now())
|
||||||
fakeRecorder := kube_record.NewFakeRecorder(5)
|
fakeRecorder := kube_record.NewFakeRecorder(5)
|
||||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, kube_record.NewFakeRecorder(5), false)
|
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
|
||||||
context := &AutoscalingContext{
|
context := &AutoscalingContext{
|
||||||
AutoscalingOptions: AutoscalingOptions{
|
AutoscalingOptions: AutoscalingOptions{
|
||||||
EstimatorName: estimator.BinpackingEstimatorName,
|
EstimatorName: estimator.BinpackingEstimatorName,
|
||||||
|
|
|
||||||
|
|
@ -44,12 +44,12 @@ type StaticAutoscaler struct {
|
||||||
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
|
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
|
||||||
func NewStaticAutoscaler(opts AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
|
func NewStaticAutoscaler(opts AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
|
||||||
kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry) (*StaticAutoscaler, errors.AutoscalerError) {
|
kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry) (*StaticAutoscaler, errors.AutoscalerError) {
|
||||||
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, kubeEventRecorder, opts.WriteStatusConfigMap)
|
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error("Failed to initialize status configmap, unable to write status events")
|
glog.Error("Failed to initialize status configmap, unable to write status events")
|
||||||
// Get a dummy, so we can at least safely call the methods
|
// Get a dummy, so we can at least safely call the methods
|
||||||
// TODO(maciekpytel): recover from this after successfull status configmap update?
|
// TODO(maciekpytel): recover from this after successfull status configmap update?
|
||||||
logRecorder, _ = utils.NewStatusMapRecorder(kubeClient, kubeEventRecorder, false)
|
logRecorder, _ = utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, false)
|
||||||
}
|
}
|
||||||
autoscalingContext, errctx := NewAutoscalingContext(opts, predicateChecker, kubeClient, kubeEventRecorder, logRecorder, listerRegistry)
|
autoscalingContext, errctx := NewAutoscalingContext(opts, predicateChecker, kubeClient, kubeEventRecorder, logRecorder, listerRegistry)
|
||||||
if errctx != nil {
|
if errctx != nil {
|
||||||
|
|
@ -120,7 +120,8 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
|
||||||
defer func() {
|
defer func() {
|
||||||
if autoscalingContext.WriteStatusConfigMap {
|
if autoscalingContext.WriteStatusConfigMap {
|
||||||
status := a.ClusterStateRegistry.GetStatus(time.Now())
|
status := a.ClusterStateRegistry.GetStatus(time.Now())
|
||||||
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, status.GetReadableString(), a.AutoscalingContext.LogRecorder)
|
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, autoscalingContext.ConfigNamespace,
|
||||||
|
status.GetReadableString(), a.AutoscalingContext.LogRecorder)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if !a.ClusterStateRegistry.IsClusterHealthy() {
|
if !a.ClusterStateRegistry.IsClusterHealthy() {
|
||||||
|
|
@ -306,5 +307,5 @@ func (a *StaticAutoscaler) ExitCleanUp() {
|
||||||
if !a.AutoscalingContext.WriteStatusConfigMap {
|
if !a.AutoscalingContext.WriteStatusConfigMap {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet)
|
utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -128,6 +128,7 @@ func createAutoscalerOptions() core.AutoscalerOptions {
|
||||||
VerifyUnschedulablePods: *verifyUnschedulablePods,
|
VerifyUnschedulablePods: *verifyUnschedulablePods,
|
||||||
WriteStatusConfigMap: *writeStatusConfigMapFlag,
|
WriteStatusConfigMap: *writeStatusConfigMapFlag,
|
||||||
BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag,
|
BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag,
|
||||||
|
ConfigNamespace: *namespace,
|
||||||
}
|
}
|
||||||
|
|
||||||
configFetcherOpts := dynamic.ConfigFetcherOptions{
|
configFetcherOpts := dynamic.ConfigFetcherOptions{
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue