diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go index b8e56432a3..cdfe0b41ba 100644 --- a/daemon/cluster/cluster.go +++ b/daemon/cluster/cluster.go @@ -111,7 +111,7 @@ func New(config Config) (*Cluster, error) { select { case <-time.After(swarmConnectTimeout): logrus.Errorf("swarm component could not be started before timeout was reached") - case <-n.Ready(context.Background()): + case <-n.Ready(): case <-ctx.Done(): } if ctx.Err() != nil { @@ -213,7 +213,7 @@ func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secre go func() { select { - case <-node.Ready(context.Background()): + case <-node.Ready(): c.Lock() c.reconnectDelay = initialReconnectDelay c.Unlock() @@ -273,7 +273,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) { c.Unlock() select { - case <-n.Ready(context.Background()): + case <-n.Ready(): if err := initAcceptancePolicy(n, req.Spec.AcceptancePolicy); err != nil { return "", err } @@ -319,7 +319,7 @@ func (c *Cluster) Join(req types.JoinRequest) error { return fmt.Errorf("Timeout reached before node was joined. Your cluster settings may be preventing this node from automatically joining. To accept this node into cluster run `docker node accept %v` in an existing cluster manager", nodeid) } return ErrSwarmJoinTimeoutReached - case <-n.Ready(context.Background()): + case <-n.Ready(): go c.reconnectOnFailure(ctx) return nil case <-ctx.Done(): diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index 32768ace8e..c46bfab05b 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -39,7 +39,7 @@ func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapt func (c *containerAdapter) pullImage(ctx context.Context) error { // if the image needs to be pulled, the auth config will be retrieved and updated - encodedAuthConfig := c.container.task.ServiceAnnotations.Labels[fmt.Sprintf("%v.registryauth", systemLabelPrefix)] + encodedAuthConfig := c.container.spec().RegistryAuth authConfig := &types.AuthConfig{} if encodedAuthConfig != "" { diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go index 7cb11132ea..efdd12ac3b 100644 --- a/daemon/cluster/executor/container/controller.go +++ b/daemon/cluster/executor/container/controller.go @@ -2,13 +2,13 @@ package container import ( "fmt" - "strings" executorpkg "github.com/docker/docker/daemon/cluster/executor" "github.com/docker/engine-api/types" "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/log" + "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -84,31 +84,32 @@ func (r *controller) Prepare(ctx context.Context) error { return err } - for { - if err := r.checkClosed(); err != nil { - return err - } - if err := r.adapter.create(ctx, r.backend); err != nil { - if isContainerCreateNameConflict(err) { - if _, err := r.adapter.inspect(ctx); err != nil { - return err - } + if err := r.adapter.pullImage(ctx); err != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(err).Error("pulling image failed") + } - // container is already created. success! - return exec.ErrTaskPrepared - } - - if !strings.Contains(err.Error(), "No such image") { // todo: better error detection - return err - } - if err := r.adapter.pullImage(ctx); err != nil { + if err := r.adapter.create(ctx, r.backend); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { return err } - continue // retry to create the container + // container is already created. success! + return exec.ErrTaskPrepared } - break + return err } return nil @@ -135,7 +136,7 @@ func (r *controller) Start(ctx context.Context) error { } if err := r.adapter.start(ctx); err != nil { - return err + return errors.Wrap(err, "starting container failed") } return nil