Cluster-Autoscaler: Delete status configmap on exit
This commit is contained in:
parent
35b0103b36
commit
d0196c9e1b
|
@ -40,6 +40,8 @@ type Autoscaler interface {
|
|||
RunOnce(currentTime time.Time)
|
||||
// CleanUp represents a clean-up required before the first invocation of RunOnce
|
||||
CleanUp()
|
||||
// ExitCleanUp is a clean-up performed just before process termination.
|
||||
ExitCleanUp()
|
||||
}
|
||||
|
||||
// NewAutoscaler creates an autoscaler of an appropriate type according to the parameters
|
||||
|
|
|
@ -52,6 +52,11 @@ func (a *DynamicAutoscaler) CleanUp() {
|
|||
a.autoscaler.CleanUp()
|
||||
}
|
||||
|
||||
// ExitCleanUp cleans-up after autoscaler, so no mess remains after process termination.
|
||||
func (a *DynamicAutoscaler) ExitCleanUp() {
|
||||
a.autoscaler.ExitCleanUp()
|
||||
}
|
||||
|
||||
// RunOnce represents a single iteration of a dynamic autoscaler inside the CA's control-loop
|
||||
func (a *DynamicAutoscaler) RunOnce(currentTime time.Time) {
|
||||
reconfigureStart := time.Now()
|
||||
|
|
|
@ -35,6 +35,10 @@ func (m *AutoscalerMock) CleanUp() {
|
|||
m.Called()
|
||||
}
|
||||
|
||||
func (m *AutoscalerMock) ExitCleanUp() {
|
||||
m.Called()
|
||||
}
|
||||
|
||||
type ConfigFetcherMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
|
|
@ -263,6 +263,16 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) {
|
|||
a.writeStatusConfigMap()
|
||||
}
|
||||
|
||||
// ExitCleanUp removes status configmap.
|
||||
func (a *StaticAutoscaler) ExitCleanUp() {
|
||||
maps := a.AutoscalingContext.ClientSet.CoreV1().ConfigMaps(StatusConfigMapNamespace)
|
||||
err := maps.Delete(StatusConfigMapName, &apiv1.DeleteOptions{})
|
||||
if err != nil {
|
||||
// Nothing else we could do at this point
|
||||
glog.Error("Failed to delete status configmap")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *StaticAutoscaler) writeStatusConfigMap() {
|
||||
statusUpdateTime := time.Now()
|
||||
status := a.ClusterStateRegistry.GetStatus(statusUpdateTime)
|
||||
|
|
|
@ -21,7 +21,9 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -142,6 +144,21 @@ func createKubeClient() kube_client.Interface {
|
|||
return kube_client.NewForConfigOrDie(kubeConfig)
|
||||
}
|
||||
|
||||
func registerSignalHandlers(autoscaler core.Autoscaler) {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
glog.Info("Registered cleanup signal handler")
|
||||
|
||||
go func() {
|
||||
<-sigs
|
||||
glog.Info("Receieved signal, attempting cleanup")
|
||||
autoscaler.ExitCleanUp()
|
||||
glog.Info("Cleaned up, exiting...")
|
||||
glog.Flush()
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
||||
// In order to meet interface criteria for LeaderElectionConfig we need to
|
||||
// take stop channel as an argument. However, since we are committing a suicide
|
||||
// after loosing mastership we can safely ignore it.
|
||||
|
@ -158,6 +175,7 @@ func run(_ <-chan struct{}) {
|
|||
autoscaler := core.NewAutoscaler(opts, predicateChecker, kubeClient, kubeEventRecorder, listerRegistry)
|
||||
|
||||
autoscaler.CleanUp()
|
||||
registerSignalHandlers(autoscaler)
|
||||
|
||||
for {
|
||||
select {
|
||||
|
|
Loading…
Reference in New Issue