Cluster-Autoscaler: update status configmap on errors

Previously it would only update after successfully completing the main
loop, meaning the status wouldn't get updated unless cluster was
healthy.
This commit is contained in:
Maciej Pytel 2017-03-15 13:15:38 +01:00
parent 41d3292456
commit c71668a8d8
1 changed files with 7 additions and 4 deletions

View File

@ -102,6 +102,13 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) {
}
a.ClusterStateRegistry.UpdateNodes(allNodes, currentTime)
// Update status information when the loop is done (regardless of reason)
defer func() {
if autoscalingContext.WriteStatusConfigMap {
status := a.ClusterStateRegistry.GetStatus(time.Now())
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, status.GetReadableString(), a.AutoscalingContext.LogRecorder)
}
}()
if !a.ClusterStateRegistry.IsClusterHealthy() {
glog.Warningf("Cluster is not ready for autoscaling: %v", err)
return
@ -262,10 +269,6 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) {
}
}
}
if autoscalingContext.WriteStatusConfigMap {
status := a.ClusterStateRegistry.GetStatus(time.Now())
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, status.GetReadableString(), a.AutoscalingContext.LogRecorder)
}
}
// ExitCleanUp removes status configmap.