Merge pull request #230 from mountkin/master

restart the event monitor when unhealthy node comes back
This commit is contained in:
Victor Vieux 2015-01-19 14:37:51 -08:00
commit 08a17cd2e6
1 changed files with 26 additions and 0 deletions

View File

@ -95,6 +95,7 @@ func (n *Node) connectClient(client dockerclient.Client) error {
// Start monitoring events from the Node.
n.client.StartMonitorEvents(n.handler)
n.emitCustomEvent("node_connect")
return nil
}
@ -259,17 +260,42 @@ func (n *Node) refreshLoop() {
}
if err != nil {
if n.healthy {
n.emitCustomEvent("node_disconnect")
}
n.healthy = false
log.Errorf("[%s/%s] Flagging node as dead. Updated state failed: %v", n.ID, n.Name, err)
} else {
if !n.healthy {
log.Infof("[%s/%s] Node came back to life. Hooray!", n.ID, n.Name)
n.client.StopAllMonitorEvents()
n.client.StartMonitorEvents(n.handler)
n.emitCustomEvent("node_reconnect")
if err := n.updateSpecs(); err != nil {
log.Errorf("[%s/%s] Update node specs failed: %v", n.ID, n.Name, err)
}
}
n.healthy = true
}
}
}
func (n *Node) emitCustomEvent(event string) {
// If there is no event handler registered, abort right now.
if n.eventHandler == nil {
return
}
ev := &Event{
Event: dockerclient.Event{
Status: event,
From: "swarm",
Time: time.Now().Unix(),
},
Node: n,
}
n.eventHandler.Handle(ev)
}
// Return the sum of memory reserved by containers.
func (n *Node) ReservedMemory() int64 {
var r int64 = 0