mirror of https://github.com/docker/docs.git
Cleanup: Make swarm Node and Cluster functions private
Signed-off-by: Victor Vieux <vieux@docker.com>
This commit is contained in:
parent
a25d0adf19
commit
eccaf6e5eb
|
@ -74,7 +74,7 @@ func (c *Cluster) CreateContainer(config *dockerclient.ContainerConfig, name str
|
|||
}
|
||||
|
||||
if nn, ok := n.(*node); ok {
|
||||
container, err := nn.Create(config, name, true)
|
||||
container, err := nn.create(config, name, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func (c *Cluster) RemoveContainer(container *cluster.Container, force bool) erro
|
|||
defer c.Unlock()
|
||||
|
||||
if n, ok := container.Node.(*node); ok {
|
||||
if err := n.Destroy(container, force); err != nil {
|
||||
if err := n.destroy(container, force); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ func (c *Cluster) newEntries(entries []*discovery.Entry) {
|
|||
go func(m *discovery.Entry) {
|
||||
if c.getNode(m.String()) == nil {
|
||||
n := NewNode(m.String(), c.options.OvercommitRatio)
|
||||
if err := n.Connect(c.options.TLSConfig); err != nil {
|
||||
if err := n.connect(c.options.TLSConfig); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func (c *Cluster) newEntries(entries []*discovery.Entry) {
|
|||
return
|
||||
}
|
||||
c.nodes[n.id] = n
|
||||
if err := n.Events(c); err != nil {
|
||||
if err := n.events(c); err != nil {
|
||||
log.Error(err)
|
||||
c.Unlock()
|
||||
return
|
||||
|
|
|
@ -14,7 +14,7 @@ func createNode(t *testing.T, ID string, containers ...dockerclient.Container) *
|
|||
node.id = ID
|
||||
|
||||
for _, container := range containers {
|
||||
node.AddContainer(&cluster.Container{Container: container, Node: node})
|
||||
node.addContainer(&cluster.Container{Container: container, Node: node})
|
||||
}
|
||||
|
||||
return node
|
||||
|
|
|
@ -76,7 +76,7 @@ func (n *node) Labels() map[string]string {
|
|||
|
||||
// Connect will initialize a connection to the Docker daemon running on the
|
||||
// host, gather machine specs (memory, cpu, ...) and monitor state changes.
|
||||
func (n *node) Connect(config *tls.Config) error {
|
||||
func (n *node) connect(config *tls.Config) error {
|
||||
host, _, err := net.SplitHostPort(n.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -126,8 +126,8 @@ func (n *node) connectClient(client dockerclient.Client) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IsConnected returns true if the engine is connected to a remote docker API
|
||||
func (n *node) IsConnected() bool {
|
||||
// isConnected returns true if the engine is connected to a remote docker API
|
||||
func (n *node) isConnected() bool {
|
||||
return n.client != nil
|
||||
}
|
||||
|
||||
|
@ -350,7 +350,7 @@ func (n *node) TotalCpus() int64 {
|
|||
return n.Cpus + (n.Cpus * n.overcommitRatio / 100)
|
||||
}
|
||||
|
||||
func (n *node) Create(config *dockerclient.ContainerConfig, name string, pullImage bool) (*cluster.Container, error) {
|
||||
func (n *node) create(config *dockerclient.ContainerConfig, name string, pullImage bool) (*cluster.Container, error) {
|
||||
var (
|
||||
err error
|
||||
id string
|
||||
|
@ -368,7 +368,7 @@ func (n *node) Create(config *dockerclient.ContainerConfig, name string, pullIma
|
|||
return nil, err
|
||||
}
|
||||
// Otherwise, try to pull the image...
|
||||
if err = n.Pull(config.Image); err != nil {
|
||||
if err = n.pull(config.Image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ...And try again.
|
||||
|
@ -388,7 +388,7 @@ func (n *node) Create(config *dockerclient.ContainerConfig, name string, pullIma
|
|||
}
|
||||
|
||||
// Destroy and remove a container from the node.
|
||||
func (n *node) Destroy(container *cluster.Container, force bool) error {
|
||||
func (n *node) destroy(container *cluster.Container, force bool) error {
|
||||
if err := n.client.RemoveContainer(container.Id, force, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -402,7 +402,7 @@ func (n *node) Destroy(container *cluster.Container, force bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *node) Pull(image string) error {
|
||||
func (n *node) pull(image string) error {
|
||||
if err := n.client.PullImage(image, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ func (n *node) Pull(image string) error {
|
|||
}
|
||||
|
||||
// Register an event handler.
|
||||
func (n *node) Events(h cluster.EventHandler) error {
|
||||
func (n *node) events(h cluster.EventHandler) error {
|
||||
if n.eventHandler != nil {
|
||||
return errors.New("event handler already set")
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ func (n *node) handler(ev *dockerclient.Event, _ chan error, args ...interface{}
|
|||
}
|
||||
|
||||
// Inject a container into the internal state.
|
||||
func (n *node) AddContainer(container *cluster.Container) error {
|
||||
func (n *node) addContainer(container *cluster.Container) error {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
|
@ -531,7 +531,7 @@ func (n *node) AddContainer(container *cluster.Container) error {
|
|||
}
|
||||
|
||||
// Inject an image into the internal state.
|
||||
func (n *node) AddImage(image *cluster.Image) {
|
||||
func (n *node) addImage(image *cluster.Image) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
|
@ -539,7 +539,7 @@ func (n *node) AddImage(image *cluster.Image) {
|
|||
}
|
||||
|
||||
// Remove a container from the internal test.
|
||||
func (n *node) RemoveContainer(container *cluster.Container) error {
|
||||
func (n *node) removeContainer(container *cluster.Container) error {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
|
@ -551,7 +551,7 @@ func (n *node) RemoveContainer(container *cluster.Container) error {
|
|||
}
|
||||
|
||||
// Wipes the internal container state.
|
||||
func (n *node) CleanupContainers() {
|
||||
func (n *node) cleanupContainers() {
|
||||
n.Lock()
|
||||
n.containers = make(map[string]*cluster.Container)
|
||||
n.Unlock()
|
||||
|
|
|
@ -27,15 +27,15 @@ var (
|
|||
|
||||
func TestNodeConnectionFailure(t *testing.T) {
|
||||
node := NewNode("test", 0)
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
// Always fail.
|
||||
client := mockclient.NewMockClient()
|
||||
client.On("Info").Return(&dockerclient.Info{}, errors.New("fail"))
|
||||
|
||||
// Connect() should fail and IsConnected() return false.
|
||||
// Connect() should fail and isConnected() return false.
|
||||
assert.Error(t, node.connectClient(client))
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
client.Mock.AssertExpectations(t)
|
||||
}
|
||||
|
@ -46,14 +46,14 @@ func TestOutdatedNode(t *testing.T) {
|
|||
client.On("Info").Return(&dockerclient.Info{}, nil)
|
||||
|
||||
assert.Error(t, node.connectClient(client))
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
client.Mock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestNodeCpusMemory(t *testing.T) {
|
||||
node := NewNode("test", 0)
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
client := mockclient.NewMockClient()
|
||||
client.On("Info").Return(mockInfo, nil)
|
||||
|
@ -62,7 +62,7 @@ func TestNodeCpusMemory(t *testing.T) {
|
|||
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
|
||||
|
||||
assert.NoError(t, node.connectClient(client))
|
||||
assert.True(t, node.IsConnected())
|
||||
assert.True(t, node.isConnected())
|
||||
assert.True(t, node.IsHealthy())
|
||||
|
||||
assert.Equal(t, node.UsedCpus(), 0)
|
||||
|
@ -73,7 +73,7 @@ func TestNodeCpusMemory(t *testing.T) {
|
|||
|
||||
func TestNodeSpecs(t *testing.T) {
|
||||
node := NewNode("test", 0)
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
client := mockclient.NewMockClient()
|
||||
client.On("Info").Return(mockInfo, nil)
|
||||
|
@ -82,7 +82,7 @@ func TestNodeSpecs(t *testing.T) {
|
|||
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
|
||||
|
||||
assert.NoError(t, node.connectClient(client))
|
||||
assert.True(t, node.IsConnected())
|
||||
assert.True(t, node.isConnected())
|
||||
assert.True(t, node.IsHealthy())
|
||||
|
||||
assert.Equal(t, node.Cpus, mockInfo.NCPU)
|
||||
|
@ -98,7 +98,7 @@ func TestNodeSpecs(t *testing.T) {
|
|||
|
||||
func TestNodeState(t *testing.T) {
|
||||
node := NewNode("test", 0)
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
client := mockclient.NewMockClient()
|
||||
client.On("Info").Return(mockInfo, nil)
|
||||
|
@ -112,7 +112,7 @@ func TestNodeState(t *testing.T) {
|
|||
client.On("InspectContainer", "two").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: 100}}, nil).Once()
|
||||
|
||||
assert.NoError(t, node.connectClient(client))
|
||||
assert.True(t, node.IsConnected())
|
||||
assert.True(t, node.isConnected())
|
||||
|
||||
// The node should only have a single container at this point.
|
||||
containers := node.Containers()
|
||||
|
@ -137,7 +137,7 @@ func TestNodeState(t *testing.T) {
|
|||
|
||||
func TestNodeContainerLookup(t *testing.T) {
|
||||
node := NewNode("test-node", 0)
|
||||
assert.False(t, node.IsConnected())
|
||||
assert.False(t, node.isConnected())
|
||||
|
||||
client := mockclient.NewMockClient()
|
||||
client.On("Info").Return(mockInfo, nil)
|
||||
|
@ -148,7 +148,7 @@ func TestNodeContainerLookup(t *testing.T) {
|
|||
client.On("InspectContainer", "container-id").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: 100}}, nil).Once()
|
||||
|
||||
assert.NoError(t, node.connectClient(client))
|
||||
assert.True(t, node.IsConnected())
|
||||
assert.True(t, node.isConnected())
|
||||
|
||||
// Invalid lookup
|
||||
assert.Nil(t, node.Container("invalid-id"))
|
||||
|
@ -184,7 +184,7 @@ func TestCreateContainer(t *testing.T) {
|
|||
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
|
||||
client.On("ListImages").Return([]*dockerclient.Image{}, nil).Once()
|
||||
assert.NoError(t, node.connectClient(client))
|
||||
assert.True(t, node.IsConnected())
|
||||
assert.True(t, node.isConnected())
|
||||
|
||||
mockConfig := *config
|
||||
mockConfig.CpuShares = config.CpuShares * mockInfo.NCPU
|
||||
|
@ -196,7 +196,7 @@ func TestCreateContainer(t *testing.T) {
|
|||
client.On("ListContainers", true, false, fmt.Sprintf(`{"id":[%q]}`, id)).Return([]dockerclient.Container{{Id: id}}, nil).Once()
|
||||
client.On("ListImages").Return([]*dockerclient.Image{}, nil).Once()
|
||||
client.On("InspectContainer", id).Return(&dockerclient.ContainerInfo{Config: config}, nil).Once()
|
||||
container, err := node.Create(config, name, false)
|
||||
container, err := node.create(config, name, false)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, container.Id, id)
|
||||
assert.Len(t, node.Containers(), 1)
|
||||
|
@ -205,7 +205,7 @@ func TestCreateContainer(t *testing.T) {
|
|||
name = "test2"
|
||||
mockConfig.CpuShares = config.CpuShares * mockInfo.NCPU
|
||||
client.On("CreateContainer", &mockConfig, name).Return("", dockerclient.ErrNotFound).Once()
|
||||
container, err = node.Create(config, name, false)
|
||||
container, err = node.create(config, name, false)
|
||||
assert.Equal(t, err, dockerclient.ErrNotFound)
|
||||
assert.Nil(t, container)
|
||||
|
||||
|
@ -219,7 +219,7 @@ func TestCreateContainer(t *testing.T) {
|
|||
client.On("ListContainers", true, false, fmt.Sprintf(`{"id":[%q]}`, id)).Return([]dockerclient.Container{{Id: id}}, nil).Once()
|
||||
client.On("ListImages").Return([]*dockerclient.Image{}, nil).Once()
|
||||
client.On("InspectContainer", id).Return(&dockerclient.ContainerInfo{Config: config}, nil).Once()
|
||||
container, err = node.Create(config, name, true)
|
||||
container, err = node.create(config, name, true)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, container.Id, id)
|
||||
assert.Len(t, node.Containers(), 2)
|
||||
|
|
Loading…
Reference in New Issue