mirror of https://github.com/docker/docs.git
nodes -> engines
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
This commit is contained in:
parent
0c1782c7c0
commit
853aaf6027
|
@ -14,10 +14,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// Force-refresh the state of the node this oftee.
|
||||
// Force-refresh the state of the engine this often.
|
||||
stateRefreshPeriod = 30 * time.Second
|
||||
|
||||
// Timeout for requests sent out to the node.
|
||||
// Timeout for requests sent out to the engine.
|
||||
requestTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
|
@ -400,7 +400,7 @@ func (e *Engine) Destroy(container *Container, force bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Pull an image on the node
|
||||
// Pull an image on the engine
|
||||
func (e *Engine) Pull(image string) error {
|
||||
if !strings.Contains(image, ":") {
|
||||
image = image + ":latest"
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
package cluster
|
||||
|
||||
// EngineSorter implements the Sort interface to sort Cluster.Node.
|
||||
// EngineSorter implements the Sort interface to sort Cluster.Engine.
|
||||
// It is not guaranteed to be a stable sort.
|
||||
type EngineSorter []*Engine
|
||||
|
||||
// Len returns the number of nodes to be sorted.
|
||||
// Len returns the number of engines to be sorted.
|
||||
func (s EngineSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap exchanges the node elements with indices i and j.
|
||||
// Swap exchanges the engine elements with indices i and j.
|
||||
func (s EngineSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less reports whether the node with index i should sort before the node with index j.
|
||||
// Nodes are sorted chronologically by name.
|
||||
// Less reports whether the engine with index i should sort before the engine with index j.
|
||||
// Engines are sorted chronologically by name.
|
||||
func (s EngineSorter) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
|
|
@ -7,12 +7,12 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNodeSorter(t *testing.T) {
|
||||
nodes := []*Engine{{Name: "name1"}, {Name: "name3"}, {Name: "name2"}}
|
||||
func TestEngineSorter(t *testing.T) {
|
||||
engines := []*Engine{{Name: "name1"}, {Name: "name3"}, {Name: "name2"}}
|
||||
|
||||
sort.Sort(EngineSorter(nodes))
|
||||
sort.Sort(EngineSorter(engines))
|
||||
|
||||
assert.Equal(t, nodes[0].Name, "name1")
|
||||
assert.Equal(t, nodes[1].Name, "name2")
|
||||
assert.Equal(t, nodes[2].Name, "name3")
|
||||
assert.Equal(t, engines[0].Name, "name1")
|
||||
assert.Equal(t, engines[1].Name, "name2")
|
||||
assert.Equal(t, engines[2].Name, "name3")
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ type Cluster struct {
|
|||
sync.RWMutex
|
||||
|
||||
eventHandler cluster.EventHandler
|
||||
nodes map[string]*cluster.Engine
|
||||
engines map[string]*cluster.Engine
|
||||
scheduler *scheduler.Scheduler
|
||||
options *cluster.Options
|
||||
store *state.Store
|
||||
|
@ -32,7 +32,7 @@ func NewCluster(scheduler *scheduler.Scheduler, store *state.Store, eventhandler
|
|||
|
||||
cluster := &Cluster{
|
||||
eventHandler: eventhandler,
|
||||
nodes: make(map[string]*cluster.Engine),
|
||||
engines: make(map[string]*cluster.Engine),
|
||||
scheduler: scheduler,
|
||||
options: options,
|
||||
store: store,
|
||||
|
@ -76,7 +76,7 @@ func (c *Cluster) CreateContainer(config *dockerclient.ContainerConfig, name str
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if nn, ok := c.nodes[n.ID]; ok {
|
||||
if nn, ok := c.engines[n.ID]; ok {
|
||||
container, err := nn.Create(config, name, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -113,29 +113,29 @@ func (c *Cluster) RemoveContainer(container *cluster.Container, force bool) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
// Entries are Docker Nodes
|
||||
// Entries are Docker Engines
|
||||
func (c *Cluster) newEntries(entries []*discovery.Entry) {
|
||||
for _, entry := range entries {
|
||||
go func(m *discovery.Entry) {
|
||||
if !c.hasNode(m.String()) {
|
||||
n := cluster.NewEngine(m.String(), c.options.OvercommitRatio)
|
||||
if err := n.Connect(c.options.TLSConfig); err != nil {
|
||||
if !c.hasEngine(m.String()) {
|
||||
engine := cluster.NewEngine(m.String(), c.options.OvercommitRatio)
|
||||
if err := engine.Connect(c.options.TLSConfig); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
c.Lock()
|
||||
|
||||
if old, exists := c.nodes[n.ID]; exists {
|
||||
if old, exists := c.engines[engine.ID]; exists {
|
||||
c.Unlock()
|
||||
if old.IP != n.IP {
|
||||
log.Errorf("ID duplicated. %s shared by %s and %s", n.ID, old.IP, n.IP)
|
||||
if old.IP != engine.IP {
|
||||
log.Errorf("ID duplicated. %s shared by %s and %s", engine.ID, old.IP, engine.IP)
|
||||
} else {
|
||||
log.Errorf("node %q is already registered", n.ID)
|
||||
log.Errorf("node %q is already registered", engine.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
c.nodes[n.ID] = n
|
||||
if err := n.Events(c); err != nil {
|
||||
c.engines[engine.ID] = engine
|
||||
if err := engine.Events(c); err != nil {
|
||||
log.Error(err)
|
||||
c.Unlock()
|
||||
return
|
||||
|
@ -147,9 +147,9 @@ func (c *Cluster) newEntries(entries []*discovery.Entry) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) hasNode(addr string) bool {
|
||||
for _, node := range c.nodes {
|
||||
if node.Addr == addr {
|
||||
func (c *Cluster) hasEngine(addr string) bool {
|
||||
for _, engine := range c.engines {
|
||||
if engine.Addr == addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ func (c *Cluster) Images() []*cluster.Image {
|
|||
defer c.RUnlock()
|
||||
|
||||
out := []*cluster.Image{}
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
out = append(out, n.Images()...)
|
||||
}
|
||||
|
||||
|
@ -178,7 +178,7 @@ func (c *Cluster) Image(IDOrName string) *cluster.Image {
|
|||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
if image := n.Image(IDOrName); image != nil {
|
||||
return image
|
||||
}
|
||||
|
@ -196,9 +196,9 @@ func (c *Cluster) RemoveImage(image *cluster.Image) ([]*dockerclient.ImageDelete
|
|||
|
||||
// Pull is exported
|
||||
func (c *Cluster) Pull(name string, callback func(what, status string)) {
|
||||
size := len(c.nodes)
|
||||
size := len(c.engines)
|
||||
done := make(chan bool, size)
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
go func(nn *cluster.Engine) {
|
||||
if callback != nil {
|
||||
callback(nn.Name, "")
|
||||
|
@ -225,7 +225,7 @@ func (c *Cluster) Containers() []*cluster.Container {
|
|||
defer c.RUnlock()
|
||||
|
||||
out := []*cluster.Container{}
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
out = append(out, n.Containers()...)
|
||||
}
|
||||
|
||||
|
@ -241,7 +241,7 @@ func (c *Cluster) Container(IDOrName string) *cluster.Container {
|
|||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
if container := n.Container(IDOrName); container != nil {
|
||||
return container
|
||||
}
|
||||
|
@ -250,13 +250,13 @@ func (c *Cluster) Container(IDOrName string) *cluster.Container {
|
|||
return nil
|
||||
}
|
||||
|
||||
// listNodes returns all the nodes in the cluster.
|
||||
// listNodes returns all the engines in the cluster.
|
||||
func (c *Cluster) listNodes() []*node.Node {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
out := []*node.Node{}
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
out = append(out, node.NewNode(n))
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ func (c *Cluster) listEngines() []*cluster.Engine {
|
|||
defer c.RUnlock()
|
||||
|
||||
out := []*cluster.Engine{}
|
||||
for _, n := range c.nodes {
|
||||
for _, n := range c.engines {
|
||||
out = append(out, n)
|
||||
}
|
||||
return out
|
||||
|
@ -280,17 +280,17 @@ func (c *Cluster) Info() [][2]string {
|
|||
info := [][2]string{
|
||||
{"\bStrategy", c.scheduler.Strategy()},
|
||||
{"\bFilters", c.scheduler.Filters()},
|
||||
{"\bNodes", fmt.Sprintf("%d", len(c.nodes))},
|
||||
{"\bNodes", fmt.Sprintf("%d", len(c.engines))},
|
||||
}
|
||||
|
||||
nodes := c.listEngines()
|
||||
sort.Sort(cluster.EngineSorter(nodes))
|
||||
engines := c.listEngines()
|
||||
sort.Sort(cluster.EngineSorter(engines))
|
||||
|
||||
for _, node := range c.nodes {
|
||||
info = append(info, [2]string{node.Name, node.Addr})
|
||||
info = append(info, [2]string{" └ Containers", fmt.Sprintf("%d", len(node.Containers()))})
|
||||
info = append(info, [2]string{" └ Reserved CPUs", fmt.Sprintf("%d / %d", node.UsedCpus(), node.TotalCpus())})
|
||||
info = append(info, [2]string{" └ Reserved Memory", fmt.Sprintf("%s / %s", units.BytesSize(float64(node.UsedMemory())), units.BytesSize(float64(node.TotalMemory())))})
|
||||
for _, engine := range engines {
|
||||
info = append(info, [2]string{engine.Name, engine.Addr})
|
||||
info = append(info, [2]string{" └ Containers", fmt.Sprintf("%d", len(engine.Containers()))})
|
||||
info = append(info, [2]string{" └ Reserved CPUs", fmt.Sprintf("%d / %d", engine.UsedCpus(), engine.TotalCpus())})
|
||||
info = append(info, [2]string{" └ Reserved Memory", fmt.Sprintf("%s / %s", units.BytesSize(float64(engine.UsedMemory())), units.BytesSize(float64(engine.TotalMemory())))})
|
||||
}
|
||||
|
||||
return info
|
||||
|
|
|
@ -8,29 +8,29 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func createNode(t *testing.T, ID string, containers ...dockerclient.Container) *cluster.Engine {
|
||||
node := cluster.NewEngine(ID, 0)
|
||||
node.Name = ID
|
||||
node.ID = ID
|
||||
func createEngine(t *testing.T, ID string, containers ...dockerclient.Container) *cluster.Engine {
|
||||
engine := cluster.NewEngine(ID, 0)
|
||||
engine.Name = ID
|
||||
engine.ID = ID
|
||||
|
||||
for _, container := range containers {
|
||||
node.AddContainer(&cluster.Container{Container: container, Engine: node})
|
||||
engine.AddContainer(&cluster.Container{Container: container, Engine: engine})
|
||||
}
|
||||
|
||||
return node
|
||||
return engine
|
||||
}
|
||||
|
||||
func TestContainerLookup(t *testing.T) {
|
||||
c := &Cluster{
|
||||
nodes: make(map[string]*cluster.Engine),
|
||||
engines: make(map[string]*cluster.Engine),
|
||||
}
|
||||
container := dockerclient.Container{
|
||||
Id: "container-id",
|
||||
Names: []string{"/container-name1", "/container-name2"},
|
||||
}
|
||||
|
||||
n := createNode(t, "test-node", container)
|
||||
c.nodes[n.ID] = n
|
||||
n := createEngine(t, "test-engine", container)
|
||||
c.engines[n.ID] = n
|
||||
|
||||
// Invalid lookup
|
||||
assert.Nil(t, c.Container("invalid-id"))
|
||||
|
@ -42,7 +42,7 @@ func TestContainerLookup(t *testing.T) {
|
|||
// Container name lookup.
|
||||
assert.NotNil(t, c.Container("container-name1"))
|
||||
assert.NotNil(t, c.Container("container-name2"))
|
||||
// Container node/name matching.
|
||||
assert.NotNil(t, c.Container("test-node/container-name1"))
|
||||
assert.NotNil(t, c.Container("test-node/container-name2"))
|
||||
// Container engine/name matching.
|
||||
assert.NotNil(t, c.Container("test-engine/container-name1"))
|
||||
assert.NotNil(t, c.Container("test-engine/container-name2"))
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue