From 394ae84a495846c259944d9bdf28ad147b6bfe27 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Mon, 5 Oct 2015 17:05:14 -0700 Subject: [PATCH 1/5] node: Use cluster.Containers instead of duplicating code Signed-off-by: Andrea Luzzardi --- scheduler/node/node.go | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/scheduler/node/node.go b/scheduler/node/node.go index c33877a72d..d8166c9392 100644 --- a/scheduler/node/node.go +++ b/scheduler/node/node.go @@ -2,7 +2,6 @@ package node import ( "errors" - "strings" "github.com/docker/swarm/cluster" ) @@ -14,7 +13,7 @@ type Node struct { Addr string Name string Labels map[string]string - Containers []*cluster.Container + Containers cluster.Containers Images []*cluster.Image UsedMemory int64 @@ -45,26 +44,7 @@ func NewNode(e *cluster.Engine) *Node { // Container returns the container with IDOrName in the engine. func (n *Node) Container(IDOrName string) *cluster.Container { - // Abort immediately if the name is empty. - if len(IDOrName) == 0 { - return nil - } - - for _, container := range n.Containers { - // Match ID prefix. - if strings.HasPrefix(container.Id, IDOrName) { - return container - } - - // Match name, /name or engine/name. - for _, name := range container.Names { - if name == IDOrName || name == "/"+IDOrName || container.Engine.ID+name == IDOrName || container.Engine.Name+name == IDOrName { - return container - } - } - } - - return nil + return n.Containers.Get(IDOrName) } // AddContainer injects a container into the internal state. From c64ae5168aa84e461ceab722a084b246963340a1 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Mon, 5 Oct 2015 17:06:45 -0700 Subject: [PATCH 2/5] Parallel scheduling support for Swarm driver. Signed-off-by: Andrea Luzzardi --- cluster/swarm/cluster.go | 76 ++++++++++++++++++++------- scheduler/filter/dependency_test.go | 79 ++++++++++++++++++++--------- 2 files changed, 113 insertions(+), 42 deletions(-) diff --git a/cluster/swarm/cluster.go b/cluster/swarm/cluster.go index b41fb405df..c6cfd8cf16 100644 --- a/cluster/swarm/cluster.go +++ b/cluster/swarm/cluster.go @@ -20,14 +20,31 @@ import ( "github.com/samalba/dockerclient" ) +type pendingContainer struct { + Config *cluster.ContainerConfig + Name string + Engine *cluster.Engine +} + +func (p *pendingContainer) ToContainer() *cluster.Container { + return &cluster.Container{ + Container: dockerclient.Container{ + Names: []string{"/" + p.Name}, + }, + Config: p.Config, + Engine: p.Engine, + } +} + // Cluster is exported type Cluster struct { sync.RWMutex - eventHandler cluster.EventHandler - engines map[string]*cluster.Engine - scheduler *scheduler.Scheduler - discovery discovery.Discovery + eventHandler cluster.EventHandler + engines map[string]*cluster.Engine + scheduler *scheduler.Scheduler + discovery discovery.Discovery + pendingContainers map[string]*pendingContainer overcommitRatio float64 TLSConfig *tls.Config @@ -38,11 +55,12 @@ func NewCluster(scheduler *scheduler.Scheduler, TLSConfig *tls.Config, discovery log.WithFields(log.Fields{"name": "swarm"}).Debug("Initializing cluster") cluster := &Cluster{ - engines: make(map[string]*cluster.Engine), - scheduler: scheduler, - TLSConfig: TLSConfig, - discovery: discovery, - overcommitRatio: 0.05, + engines: make(map[string]*cluster.Engine), + scheduler: scheduler, + TLSConfig: TLSConfig, + discovery: discovery, + pendingContainers: make(map[string]*pendingContainer), + overcommitRatio: 0.05, } if val, ok := options.Float("swarm.overcommit", ""); ok { @@ -102,15 +120,16 @@ func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string) func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, withSoftImageAffinity bool) (*cluster.Container, error) { c.scheduler.Lock() - defer c.scheduler.Unlock() // Ensure the name is available if cID := c.getIDFromName(name); cID != "" { + c.scheduler.Unlock() return nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, cID, name) } // Associate a Swarm ID to the container we are creating. - config.SetSwarmID(c.generateUniqueID()) + swarmID := c.generateUniqueID() + config.SetSwarmID(swarmID) configTemp := config if withSoftImageAffinity { @@ -119,15 +138,30 @@ func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, n, err := c.scheduler.SelectNodeForContainer(c.listNodes(), configTemp) if err != nil { + c.scheduler.Unlock() return nil, err } - - if nn, ok := c.engines[n.ID]; ok { - container, err := nn.Create(config, name, true) - return container, err + engine, ok := c.engines[n.ID] + if !ok { + c.scheduler.Unlock() + return nil, nil } - return nil, nil + c.pendingContainers[swarmID] = &pendingContainer{ + Name: name, + Config: config, + Engine: engine, + } + + c.scheduler.Unlock() + + container, err := engine.Create(config, name, true) + + c.scheduler.Lock() + delete(c.pendingContainers, swarmID) + c.scheduler.Unlock() + + return container, err } // RemoveContainer aka Remove a container from the cluster. Containers should @@ -571,8 +605,14 @@ func (c *Cluster) listNodes() []*node.Node { defer c.RUnlock() out := make([]*node.Node, 0, len(c.engines)) - for _, n := range c.engines { - out = append(out, node.NewNode(n)) + for _, e := range c.engines { + node := node.NewNode(e) + for _, c := range c.pendingContainers { + if c.Engine.ID == e.ID && node.Container(c.Config.SwarmID()) == nil { + node.AddContainer(c.ToContainer()) + } + } + out = append(out, node) } return out diff --git a/scheduler/filter/dependency_test.go b/scheduler/filter/dependency_test.go index cefe201bca..db3244b9f9 100644 --- a/scheduler/filter/dependency_test.go +++ b/scheduler/filter/dependency_test.go @@ -14,24 +14,33 @@ func TestDependencyFilterSimple(t *testing.T) { f = DependencyFilter{} nodes = []*node.Node{ { - ID: "node-0-id", - Name: "node-0-name", - Addr: "node-0", - Containers: []*cluster.Container{{Container: dockerclient.Container{Id: "c0"}}}, + ID: "node-0-id", + Name: "node-0-name", + Addr: "node-0", + Containers: []*cluster.Container{{ + Container: dockerclient.Container{Id: "c0"}, + Config: &cluster.ContainerConfig{}, + }}, }, { - ID: "node-1-id", - Name: "node-1-name", - Addr: "node-1", - Containers: []*cluster.Container{{Container: dockerclient.Container{Id: "c1"}}}, + ID: "node-1-id", + Name: "node-1-name", + Addr: "node-1", + Containers: []*cluster.Container{{ + Container: dockerclient.Container{Id: "c1"}, + Config: &cluster.ContainerConfig{}, + }}, }, { - ID: "node-2-id", - Name: "node-2-name", - Addr: "node-2", - Containers: []*cluster.Container{{Container: dockerclient.Container{Id: "c2"}}}, + ID: "node-2-id", + Name: "node-2-name", + Addr: "node-2", + Containers: []*cluster.Container{{ + Container: dockerclient.Container{Id: "c2"}, + Config: &cluster.ContainerConfig{}, + }}, }, } result []*node.Node @@ -109,17 +118,28 @@ func TestDependencyFilterMulti(t *testing.T) { Name: "node-0-name", Addr: "node-0", Containers: []*cluster.Container{ - {Container: dockerclient.Container{Id: "c0"}}, - {Container: dockerclient.Container{Id: "c1"}}, + { + Container: dockerclient.Container{Id: "c0"}, + Config: &cluster.ContainerConfig{}, + }, + { + Container: dockerclient.Container{Id: "c1"}, + Config: &cluster.ContainerConfig{}, + }, }, }, // nodes[1] has c2 { - ID: "node-1-id", - Name: "node-1-name", - Addr: "node-1", - Containers: []*cluster.Container{{Container: dockerclient.Container{Id: "c2"}}}, + ID: "node-1-id", + Name: "node-1-name", + Addr: "node-1", + Containers: []*cluster.Container{ + { + Container: dockerclient.Container{Id: "c2"}, + Config: &cluster.ContainerConfig{}, + }, + }, }, // nodes[2] has nothing @@ -179,17 +199,28 @@ func TestDependencyFilterChaining(t *testing.T) { Name: "node-0-name", Addr: "node-0", Containers: []*cluster.Container{ - {Container: dockerclient.Container{Id: "c0"}}, - {Container: dockerclient.Container{Id: "c1"}}, + { + Container: dockerclient.Container{Id: "c0"}, + Config: &cluster.ContainerConfig{}, + }, + { + Container: dockerclient.Container{Id: "c1"}, + Config: &cluster.ContainerConfig{}, + }, }, }, // nodes[1] has c2 { - ID: "node-1-id", - Name: "node-1-name", - Addr: "node-1", - Containers: []*cluster.Container{{Container: dockerclient.Container{Id: "c2"}}}, + ID: "node-1-id", + Name: "node-1-name", + Addr: "node-1", + Containers: []*cluster.Container{ + { + Container: dockerclient.Container{Id: "c2"}, + Config: &cluster.ContainerConfig{}, + }, + }, }, // nodes[2] has nothing From 91279c8256f0a5d851ddcc8a3be5e66bc547d043 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Tue, 6 Oct 2015 16:33:17 -0700 Subject: [PATCH 3/5] cluster: Check name uniqueness among pending containers. Signed-off-by: Andrea Luzzardi --- cluster/swarm/cluster.go | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/cluster/swarm/cluster.go b/cluster/swarm/cluster.go index c6cfd8cf16..498a80a4ca 100644 --- a/cluster/swarm/cluster.go +++ b/cluster/swarm/cluster.go @@ -122,9 +122,9 @@ func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, c.scheduler.Lock() // Ensure the name is available - if cID := c.getIDFromName(name); cID != "" { + if !c.checkNameUniqueness(name) { c.scheduler.Unlock() - return nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, cID, name) + return nil, fmt.Errorf("Conflict, The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", name, name) } // Associate a Swarm ID to the container we are creating. @@ -532,10 +532,10 @@ func (c *Cluster) Containers() cluster.Containers { return out } -func (c *Cluster) getIDFromName(name string) string { +func (c *Cluster) checkNameUniqueness(name string) bool { // Abort immediately if the name is empty. if len(name) == 0 { - return "" + return true } c.RLock() @@ -544,12 +544,20 @@ func (c *Cluster) getIDFromName(name string) string { for _, c := range e.Containers() { for _, cname := range c.Names { if cname == name || cname == "/"+name { - return c.Id + return false } } } } - return "" + + // check pending containers. + for _, c := range c.pendingContainers { + if c.Name == name { + return false + } + } + + return true } // Container returns the container with IDOrName in the cluster @@ -693,8 +701,8 @@ func (c *Cluster) RenameContainer(container *cluster.Container, newName string) defer c.RUnlock() // check new name whether available - if cID := c.getIDFromName(newName); cID != "" { - return fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", newName, cID, newName) + if !c.checkNameUniqueness(newName) { + return fmt.Errorf("Conflict, The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", newName, newName) } // call engine rename From 24394612f53c5a3a3abc20958856b12b7285adb7 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Wed, 7 Oct 2015 13:07:57 -0700 Subject: [PATCH 4/5] cluster: Don't lock the scheduler when removing a container. Signed-off-by: Andrea Luzzardi --- cluster/swarm/cluster.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/cluster/swarm/cluster.go b/cluster/swarm/cluster.go index 498a80a4ca..7441ce0d86 100644 --- a/cluster/swarm/cluster.go +++ b/cluster/swarm/cluster.go @@ -164,14 +164,9 @@ func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, return container, err } -// RemoveContainer aka Remove a container from the cluster. Containers should -// always be destroyed through the scheduler to guarantee atomicity. +// RemoveContainer aka Remove a container from the cluster. func (c *Cluster) RemoveContainer(container *cluster.Container, force, volumes bool) error { - c.scheduler.Lock() - defer c.scheduler.Unlock() - - err := container.Engine.RemoveContainer(container, force, volumes) - return err + return container.Engine.RemoveContainer(container, force, volumes) } func (c *Cluster) getEngineByAddr(addr string) *cluster.Engine { From 7c0539c6505b4d17026689f52e2ce9dc8d41d2ed Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Thu, 8 Oct 2015 16:21:09 -0700 Subject: [PATCH 5/5] cluster: Fix name setting of pending containers. Signed-off-by: Andrea Luzzardi --- cluster/swarm/cluster.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/cluster/swarm/cluster.go b/cluster/swarm/cluster.go index 7441ce0d86..011f918633 100644 --- a/cluster/swarm/cluster.go +++ b/cluster/swarm/cluster.go @@ -27,13 +27,17 @@ type pendingContainer struct { } func (p *pendingContainer) ToContainer() *cluster.Container { - return &cluster.Container{ - Container: dockerclient.Container{ - Names: []string{"/" + p.Name}, - }, - Config: p.Config, - Engine: p.Engine, + container := &cluster.Container{ + Container: dockerclient.Container{}, + Config: p.Config, + Engine: p.Engine, } + + if p.Name != "" { + container.Container.Names = []string{"/" + p.Name} + } + + return container } // Cluster is exported @@ -124,7 +128,7 @@ func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, // Ensure the name is available if !c.checkNameUniqueness(name) { c.scheduler.Unlock() - return nil, fmt.Errorf("Conflict, The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", name, name) + return nil, fmt.Errorf("Conflict: The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", name, name) } // Associate a Swarm ID to the container we are creating. @@ -697,7 +701,7 @@ func (c *Cluster) RenameContainer(container *cluster.Container, newName string) // check new name whether available if !c.checkNameUniqueness(newName) { - return fmt.Errorf("Conflict, The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", newName, newName) + return fmt.Errorf("Conflict: The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", newName, newName) } // call engine rename