implement `--until` flag for swarm api

- mostly copied the idea from base docker
 - refactored the locking in event handling
 - integration test for until flag

Signed-off-by: Morgan Bauer <mbauer@us.ibm.com>
This commit is contained in:
Morgan Bauer 2015-09-09 12:58:24 -07:00
parent e9cfa55e92
commit b1d3b625de
No known key found for this signature in database
GPG Key ID: 23F15C502128F348
3 changed files with 71 additions and 14 deletions

View File

@ -5,6 +5,7 @@ import (
"io"
"net/http"
"sync"
"time"
"github.com/docker/swarm/cluster"
)
@ -34,12 +35,34 @@ func (eh *eventsHandler) Add(remoteAddr string, w io.Writer) {
}
// Wait waits on a signal from the remote address.
func (eh *eventsHandler) Wait(remoteAddr string) {
<-eh.cs[remoteAddr]
func (eh *eventsHandler) Wait(remoteAddr string, until int64) {
timer := time.NewTimer(0)
timer.Stop()
if until > 0 {
dur := time.Unix(until, 0).Sub(time.Now())
timer = time.NewTimer(dur)
}
select {
case <-eh.cs[remoteAddr]:
case <-timer.C: // `--until` timeout
close(eh.cs[remoteAddr])
}
eh.cleanupHandler(remoteAddr)
}
func (eh *eventsHandler) cleanupHandler(remoteAddr string) {
eh.Lock()
// the maps are expected to have the same keys
delete(eh.cs, remoteAddr)
delete(eh.ws, remoteAddr)
eh.Unlock()
}
// Handle writes information about a cluster event to each remote address in the cluster that has been added to the events handler.
// After a successful write to a remote address, the associated channel is closed and the address is removed from the events handler.
// After an unsuccessful write to a remote address, the associated channel is closed and the address is removed from the events handler.
func (eh *eventsHandler) Handle(e *cluster.Event) error {
eh.RLock()
@ -66,24 +89,16 @@ func (eh *eventsHandler) Handle(e *cluster.Event) error {
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
}
eh.RUnlock()
if len(failed) > 0 {
eh.Lock()
for _, key := range failed {
if ch, ok := eh.cs[key]; ok {
close(ch)
// the maps are expected to have the same keys
delete(eh.cs, key)
delete(eh.ws, key)
}
}
eh.Unlock()
}
return nil

View File

@ -485,6 +485,21 @@ func postImagesLoad(c *context, w http.ResponseWriter, r *http.Request) {
// GET /events
func getEvents(c *context, w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
httpError(w, err.Error(), 400)
return
}
var until int64 = -1
if r.Form.Get("until") != "" {
u, err := strconv.ParseInt(r.Form.Get("until"), 10, 64)
if err != nil {
httpError(w, err.Error(), 400)
return
}
until = u
}
c.eventsHandler.Add(r.RemoteAddr, w)
w.Header().Set("Content-Type", "application/json")
@ -493,7 +508,7 @@ func getEvents(c *context, w http.ResponseWriter, r *http.Request) {
f.Flush()
}
c.eventsHandler.Wait(r.RemoteAddr)
c.eventsHandler.Wait(r.RemoteAddr, until)
}
// POST /containers/{name:.*}/exec

View File

@ -17,7 +17,7 @@ function teardown() {
local events_pid="$!"
# This should emit 3 events: create, start, die.
docker_swarm run --name test_container -e constraint:node==node-0 busybox true
docker_swarm run -d --name test_container -e constraint:node==node-0 busybox true
# events might take a little big to show up, wait until we get the last one.
retry 5 0.5 grep -q "die" "$log_file"
@ -25,7 +25,10 @@ function teardown() {
# clean up `docker events`
kill "$events_pid"
# verify
# verify size
[[ $(wc -l < ${log_file}) == 3 ]]
# verify content
run cat "$log_file"
[ "$status" -eq 0 ]
[[ "${output}" == *"node:node-0"* ]]
@ -36,3 +39,27 @@ function teardown() {
# after ok, remove the log file
rm -f "$log_file"
}
@test "docker events until" {
# produce less output because we timed out
start_docker_with_busybox 2
swarm_manage
# start events, report real time events to $log_file
local log_file=$(mktemp)
ONE_SECOND_IN_THE_PAST=$(($(date +%s) - 1))
docker_swarm events --until ${ONE_SECOND_IN_THE_PAST} > "$log_file"
# This should emit 3 events: create, start, die.
docker_swarm run --name test_container -e constraint:node==node-0 busybox true
# do not need to kill events, it's already dead
# verify size
[[ $(wc -l < ${log_file}) == 0 ]]
# no content, so nothing else to verify
# after ok, remove the log file
rm -f "$log_file"
}