Make status configmap respect namespace parameter

This commit is contained in:
Maciej Pytel 2017-06-14 13:06:29 +02:00
parent 7c5679baaf
commit fe514ed75d
7 changed files with 35 additions and 30 deletions

View File

@ -32,8 +32,6 @@ import (
)
const (
// StatusConfigMapNamespace is the namespace where ConfigMap with status is stored.
StatusConfigMapNamespace = "kube-system"
// StatusConfigMapName is the name of ConfigMap with status.
StatusConfigMapName = "cluster-autoscaler-status"
// ConfigMapLastUpdatedKey is the name of annotation informing about status ConfigMap last update.
@ -64,11 +62,11 @@ func (ler *LogEventRecorder) Eventf(eventtype, reason, message string, args ...i
// NewStatusMapRecorder creates a LogEventRecorder creating events on status configmap.
// If the configmap doesn't exist it will be created (with 'Initializing' status).
// If active == false the map will not be created and no events will be recorded.
func NewStatusMapRecorder(kubeClient kube_client.Interface, recorder record.EventRecorder, active bool) (*LogEventRecorder, error) {
func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, recorder record.EventRecorder, active bool) (*LogEventRecorder, error) {
var mapObj runtime.Object
var err error
if active {
mapObj, err = WriteStatusConfigMap(kubeClient, "Initializing", nil)
mapObj, err = WriteStatusConfigMap(kubeClient, namespace, "Initializing", nil)
if err != nil {
return nil, errors.New("Failed to init status ConfigMap")
}
@ -83,13 +81,13 @@ func NewStatusMapRecorder(kubeClient kube_client.Interface, recorder record.Even
// WriteStatusConfigMap writes updates status ConfigMap with a given message or creates a new
// ConfigMap if it doesn't exist. If logRecorder is passed and configmap update is successfull
// logRecorder's internal reference will be updated.
func WriteStatusConfigMap(kubeClient kube_client.Interface, msg string, logRecorder *LogEventRecorder) (*apiv1.ConfigMap, error) {
func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, msg string, logRecorder *LogEventRecorder) (*apiv1.ConfigMap, error) {
statusUpdateTime := time.Now()
statusMsg := fmt.Sprintf("Cluster-autoscaler status at %v:\n%v", statusUpdateTime, msg)
var configMap *apiv1.ConfigMap
var getStatusError, writeStatusError error
var errMsg string
maps := kubeClient.CoreV1().ConfigMaps(StatusConfigMapNamespace)
maps := kubeClient.CoreV1().ConfigMaps(namespace)
configMap, getStatusError = maps.Get(StatusConfigMapName, metav1.GetOptions{})
if getStatusError == nil {
configMap.Data["status"] = statusMsg
@ -101,7 +99,7 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, msg string, logRecor
} else if kube_errors.IsNotFound(getStatusError) {
configMap = &apiv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: StatusConfigMapNamespace,
Namespace: namespace,
Name: StatusConfigMapName,
Annotations: map[string]string{
ConfigMapLastUpdatedKey: fmt.Sprintf("%v", statusUpdateTime),
@ -132,8 +130,8 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, msg string, logRecor
}
// DeleteStatusConfigMap deletes status configmap
func DeleteStatusConfigMap(kubeClient kube_client.Interface) error {
maps := kubeClient.CoreV1().ConfigMaps(StatusConfigMapNamespace)
func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string) error {
maps := kubeClient.CoreV1().ConfigMaps(namespace)
err := maps.Delete(StatusConfigMapName, &metav1.DeleteOptions{})
if err != nil {
glog.Error("Failed to delete status configmap")

View File

@ -33,6 +33,7 @@ import (
type testInfo struct {
client *fake.Clientset
configMap *apiv1.ConfigMap
namespace string
getError error
getCalled bool
updateCalled bool
@ -41,15 +42,17 @@ type testInfo struct {
}
func setUpTest(t *testing.T) *testInfo {
namespace := "kube-system"
result := testInfo{
client: &fake.Clientset{},
configMap: &apiv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: StatusConfigMapNamespace,
Namespace: namespace,
Name: StatusConfigMapName,
},
Data: map[string]string{},
},
namespace: namespace,
getCalled: false,
updateCalled: false,
createCalled: false,
@ -57,7 +60,7 @@ func setUpTest(t *testing.T) *testInfo {
}
result.client.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
get := action.(core.GetAction)
assert.Equal(result.t, StatusConfigMapNamespace, get.GetNamespace())
assert.Equal(result.t, namespace, get.GetNamespace())
assert.Equal(result.t, StatusConfigMapName, get.GetName())
result.getCalled = true
if result.getError != nil {
@ -67,13 +70,13 @@ func setUpTest(t *testing.T) *testInfo {
})
result.client.Fake.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
assert.Equal(result.t, StatusConfigMapNamespace, update.GetNamespace())
assert.Equal(result.t, namespace, update.GetNamespace())
result.updateCalled = true
return true, result.configMap, nil
})
result.client.Fake.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
assert.Equal(result.t, StatusConfigMapNamespace, create.GetNamespace())
assert.Equal(result.t, namespace, create.GetNamespace())
configMap := create.GetObject().(*apiv1.ConfigMap)
assert.Equal(result.t, StatusConfigMapName, configMap.ObjectMeta.Name)
result.createCalled = true
@ -84,7 +87,7 @@ func setUpTest(t *testing.T) *testInfo {
func TestWriteStatusConfigMapExisting(t *testing.T) {
ti := setUpTest(t)
result, err := WriteStatusConfigMap(ti.client, "TEST_MSG", nil)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
assert.Equal(t, ti.configMap, result)
assert.Contains(t, result.Data["status"], "TEST_MSG")
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
@ -97,7 +100,7 @@ func TestWriteStatusConfigMapExisting(t *testing.T) {
func TestWriteStatusConfigMapCreate(t *testing.T) {
ti := setUpTest(t)
ti.getError = kube_errors.NewNotFound(apiv1.Resource("configmap"), "nope, not found")
result, err := WriteStatusConfigMap(ti.client, "TEST_MSG", nil)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
assert.Contains(t, result.Data["status"], "TEST_MSG")
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
assert.Nil(t, err)
@ -109,7 +112,7 @@ func TestWriteStatusConfigMapCreate(t *testing.T) {
func TestWriteStatusConfigMapError(t *testing.T) {
ti := setUpTest(t)
ti.getError = errors.New("stuff bad")
result, err := WriteStatusConfigMap(ti.client, "TEST_MSG", nil)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "stuff bad")
assert.Nil(t, result)

View File

@ -105,6 +105,8 @@ type AutoscalingOptions struct {
WriteStatusConfigMap bool
// BalanceSimilarNodeGroups enables logic that identifies node groups with similar machines and tries to balance node count between them.
BalanceSimilarNodeGroups bool
// ConfigNamespace is the namesapce cluster-autoscaler is running in and all related configmaps live in
ConfigNamespace string
}
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments

View File

@ -68,7 +68,7 @@ func TestFindUnneededNodes(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
provider := testprovider.NewTestCloudProvider(nil, nil)
provider.AddNodeGroup("ng1", 1, 10, 2)
@ -273,7 +273,7 @@ func TestScaleDown(t *testing.T) {
assert.NotNil(t, provider)
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5,
@ -331,7 +331,7 @@ func TestNoScaleDownUnready(t *testing.T) {
provider.AddNode("ng1", n2)
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5,
@ -435,7 +435,7 @@ func TestScaleDownNoMove(t *testing.T) {
assert.NotNil(t, provider)
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
ScaleDownUtilizationThreshold: 0.5,

View File

@ -80,7 +80,7 @@ func TestScaleUpOK(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_record.NewFakeRecorder(5)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
@ -157,7 +157,7 @@ func TestScaleUpNodeComingNoScale(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
@ -222,7 +222,7 @@ func TestScaleUpNodeComingHasScale(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
@ -280,7 +280,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now())
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, fakeRecorder, false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
@ -329,7 +329,7 @@ func TestScaleUpNoHelp(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now())
fakeRecorder := kube_record.NewFakeRecorder(5)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
@ -407,7 +407,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{})
clusterState.UpdateNodes(nodes, time.Now())
fakeRecorder := kube_record.NewFakeRecorder(5)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
context := &AutoscalingContext{
AutoscalingOptions: AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,

View File

@ -44,12 +44,12 @@ type StaticAutoscaler struct {
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
func NewStaticAutoscaler(opts AutoscalingOptions, predicateChecker *simulator.PredicateChecker,
kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder, listerRegistry kube_util.ListerRegistry) (*StaticAutoscaler, errors.AutoscalerError) {
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, kubeEventRecorder, opts.WriteStatusConfigMap)
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap)
if err != nil {
glog.Error("Failed to initialize status configmap, unable to write status events")
// Get a dummy, so we can at least safely call the methods
// TODO(maciekpytel): recover from this after successfull status configmap update?
logRecorder, _ = utils.NewStatusMapRecorder(kubeClient, kubeEventRecorder, false)
logRecorder, _ = utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, false)
}
autoscalingContext, errctx := NewAutoscalingContext(opts, predicateChecker, kubeClient, kubeEventRecorder, logRecorder, listerRegistry)
if errctx != nil {
@ -120,7 +120,8 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
defer func() {
if autoscalingContext.WriteStatusConfigMap {
status := a.ClusterStateRegistry.GetStatus(time.Now())
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, status.GetReadableString(), a.AutoscalingContext.LogRecorder)
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, autoscalingContext.ConfigNamespace,
status.GetReadableString(), a.AutoscalingContext.LogRecorder)
}
}()
if !a.ClusterStateRegistry.IsClusterHealthy() {
@ -306,5 +307,5 @@ func (a *StaticAutoscaler) ExitCleanUp() {
if !a.AutoscalingContext.WriteStatusConfigMap {
return
}
utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet)
utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace)
}

View File

@ -128,6 +128,7 @@ func createAutoscalerOptions() core.AutoscalerOptions {
VerifyUnschedulablePods: *verifyUnschedulablePods,
WriteStatusConfigMap: *writeStatusConfigMapFlag,
BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag,
ConfigNamespace: *namespace,
}
configFetcherOpts := dynamic.ConfigFetcherOptions{