Merge pull request #23361 from docker/swarm

Add dependency to docker/swarmkit
This commit is contained in:
Tibor Vass 2016-06-14 00:55:55 -07:00 committed by GitHub
commit a1e319e847
600 changed files with 131497 additions and 1499 deletions

View File

@ -0,0 +1,70 @@
package idresolver
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types/swarm"
)
// IDResolver provides ID to Name resolution.
type IDResolver struct {
client client.APIClient
noResolve bool
cache map[string]string
}
// New creates a new IDResolver.
func New(client client.APIClient, noResolve bool) *IDResolver {
return &IDResolver{
client: client,
noResolve: noResolve,
cache: make(map[string]string),
}
}
func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) {
switch t.(type) {
case swarm.Node:
node, err := r.client.NodeInspect(ctx, id)
if err != nil {
return id, nil
}
if node.Spec.Annotations.Name != "" {
return node.Spec.Annotations.Name, nil
}
if node.Description.Hostname != "" {
return node.Description.Hostname, nil
}
return id, nil
case swarm.Service:
service, err := r.client.ServiceInspect(ctx, id)
if err != nil {
return id, nil
}
return service.Spec.Annotations.Name, nil
default:
return "", fmt.Errorf("unsupported type")
}
}
// Resolve will attempt to resolve an ID to a Name by querying the manager.
// Results are stored into a cache.
// If the `-n` flag is used in the command-line, resolution is disabled.
func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) {
if r.noResolve {
return id, nil
}
if name, ok := r.cache[id]; ok {
return name, nil
}
name, err := r.get(ctx, t, id)
if err != nil {
return "", err
}
r.cache[id] = name
return name, nil
}

View File

@ -10,6 +10,7 @@ import (
"github.com/docker/docker/pkg/ioutils"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/utils"
"github.com/docker/engine-api/types/swarm"
"github.com/docker/go-units"
)
@ -68,6 +69,21 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
fmt.Fprintf(cli.out, "\n")
}
fmt.Fprintf(cli.out, "Swarm: %v\n", info.Swarm.LocalNodeState)
if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive {
fmt.Fprintf(cli.out, " NodeID: %s\n", info.Swarm.NodeID)
if info.Swarm.Error != "" {
fmt.Fprintf(cli.out, " Error: %v\n", info.Swarm.Error)
}
if info.Swarm.ControlAvailable {
fmt.Fprintf(cli.out, " IsManager: Yes\n")
fmt.Fprintf(cli.out, " Managers: %d\n", info.Swarm.Managers)
fmt.Fprintf(cli.out, " Nodes: %d\n", info.Swarm.Nodes)
ioutils.FprintfIfNotEmpty(cli.out, " CACertHash: %s\n", info.Swarm.CACertHash)
} else {
fmt.Fprintf(cli.out, " IsManager: No\n")
}
}
ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion)
ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem)
ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType)

View File

@ -11,19 +11,19 @@ import (
"github.com/docker/engine-api/client"
)
// CmdInspect displays low-level information on one or more containers or images.
// CmdInspect displays low-level information on one or more containers, images or tasks.
//
// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true)
cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]"}, Cli.DockerCommands["inspect"].Description, true)
tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template")
inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)")
inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image, container or task)")
size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container")
cmd.Require(flag.Min, 1)
cmd.ParseFlags(args, true)
if *inspectType != "" && *inspectType != "container" && *inspectType != "image" {
if *inspectType != "" && *inspectType != "container" && *inspectType != "image" && *inspectType != "task" {
return fmt.Errorf("%q is not a valid value for --type", *inspectType)
}
@ -35,6 +35,11 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
elementSearcher = cli.inspectContainers(ctx, *size)
case "image":
elementSearcher = cli.inspectImages(ctx, *size)
case "task":
if *size {
fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks")
}
elementSearcher = cli.inspectTasks(ctx)
default:
elementSearcher = cli.inspectAll(ctx, *size)
}
@ -54,6 +59,12 @@ func (cli *DockerCli) inspectImages(ctx context.Context, getSize bool) inspect.G
}
}
func (cli *DockerCli) inspectTasks(ctx context.Context) inspect.GetRefFunc {
return func(ref string) (interface{}, []byte, error) {
return cli.client.TaskInspectWithRaw(ctx, ref)
}
}
func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspect.GetRefFunc {
return func(ref string) (interface{}, []byte, error) {
c, rawContainer, err := cli.client.ContainerInspectWithRaw(ctx, ref, getSize)
@ -63,7 +74,15 @@ func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspect.GetR
i, rawImage, err := cli.client.ImageInspectWithRaw(ctx, ref, getSize)
if err != nil {
if client.IsErrImageNotFound(err) {
return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref)
// Search for task with that id if an image doesn't exists.
t, rawTask, err := cli.client.TaskInspectWithRaw(ctx, ref)
if err != nil {
return nil, nil, fmt.Errorf("Error: No such image, container or task: %s", ref)
}
if getSize {
fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks")
}
return t, rawTask, nil
}
return nil, nil, err
}

View File

@ -71,7 +71,7 @@ func runList(dockerCli *client.DockerCli, opts listOptions) error {
w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
if !opts.quiet {
fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER")
fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER\tSCOPE")
fmt.Fprintf(w, "\n")
}
@ -79,6 +79,8 @@ func runList(dockerCli *client.DockerCli, opts listOptions) error {
for _, networkResource := range networkResources {
ID := networkResource.ID
netName := networkResource.Name
driver := networkResource.Driver
scope := networkResource.Scope
if !opts.noTrunc {
ID = stringid.TruncateID(ID)
}
@ -86,11 +88,11 @@ func runList(dockerCli *client.DockerCli, opts listOptions) error {
fmt.Fprintln(w, ID)
continue
}
driver := networkResource.Driver
fmt.Fprintf(w, "%s\t%s\t%s\t",
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t",
ID,
netName,
driver)
driver,
scope)
fmt.Fprint(w, "\n")
}
w.Flush()

40
api/client/node/accept.go Normal file
View File

@ -0,0 +1,40 @@
package node
import (
"fmt"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newAcceptCommand(dockerCli *client.DockerCli) *cobra.Command {
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "accept NODE [NODE...]",
Short: "Accept a node in the swarm",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runAccept(dockerCli, flags, args)
},
}
flags = cmd.Flags()
return cmd
}
func runAccept(dockerCli *client.DockerCli, flags *pflag.FlagSet, args []string) error {
for _, id := range args {
if err := runUpdate(dockerCli, id, func(node *swarm.Node) {
node.Spec.Membership = swarm.NodeMembershipAccepted
}); err != nil {
return err
}
fmt.Println(id, "attempting to accept a node in the swarm.")
}
return nil
}

49
api/client/node/cmd.go Normal file
View File

@ -0,0 +1,49 @@
package node
import (
"fmt"
"golang.org/x/net/context"
"github.com/spf13/cobra"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
apiclient "github.com/docker/engine-api/client"
)
// NewNodeCommand returns a cobra command for `node` subcommands
func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command {
cmd := &cobra.Command{
Use: "node",
Short: "Manage docker swarm nodes",
Args: cli.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
},
}
cmd.AddCommand(
newAcceptCommand(dockerCli),
newDemoteCommand(dockerCli),
newInspectCommand(dockerCli),
newListCommand(dockerCli),
newPromoteCommand(dockerCli),
newRemoveCommand(dockerCli),
newTasksCommand(dockerCli),
newUpdateCommand(dockerCli),
)
return cmd
}
func nodeReference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) {
// The special value "self" for a node reference is mapped to the current
// node, hence the node ID is retrieved using the `/info` endpoint.
if ref == "self" {
info, err := client.Info(ctx)
if err != nil {
return "", err
}
return info.Swarm.NodeID, nil
}
return ref, nil
}

40
api/client/node/demote.go Normal file
View File

@ -0,0 +1,40 @@
package node
import (
"fmt"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newDemoteCommand(dockerCli *client.DockerCli) *cobra.Command {
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "demote NODE [NODE...]",
Short: "Demote a node from manager in the swarm",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runDemote(dockerCli, flags, args)
},
}
flags = cmd.Flags()
return cmd
}
func runDemote(dockerCli *client.DockerCli, flags *pflag.FlagSet, args []string) error {
for _, id := range args {
if err := runUpdate(dockerCli, id, func(node *swarm.Node) {
node.Spec.Role = swarm.NodeRoleWorker
}); err != nil {
return err
}
fmt.Println(id, "attempting to demote a manager in the swarm.")
}
return nil
}

141
api/client/node/inspect.go Normal file
View File

@ -0,0 +1,141 @@
package node
import (
"fmt"
"io"
"sort"
"strings"
"github.com/docker/docker/api/client"
"github.com/docker/docker/api/client/inspect"
"github.com/docker/docker/cli"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/engine-api/types/swarm"
"github.com/docker/go-units"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type inspectOptions struct {
nodeIds []string
format string
pretty bool
}
func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
var opts inspectOptions
cmd := &cobra.Command{
Use: "inspect [OPTIONS] self|NODE [NODE...]",
Short: "Inspect a node in the swarm",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.nodeIds = args
return runInspect(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
flags.BoolVarP(&opts.pretty, "pretty", "p", false, "Print the information in a human friendly format.")
return cmd
}
func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
client := dockerCli.Client()
ctx := context.Background()
getRef := func(ref string) (interface{}, []byte, error) {
nodeRef, err := nodeReference(client, ctx, ref)
if err != nil {
return nil, nil, err
}
node, err := client.NodeInspect(ctx, nodeRef)
return node, nil, err
}
if !opts.pretty {
return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef)
}
return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef)
}
func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error {
for idx, ref := range refs {
obj, _, err := getRef(ref)
if err != nil {
return err
}
printNode(out, obj.(swarm.Node))
// TODO: better way to do this?
// print extra space between objects, but not after the last one
if idx+1 != len(refs) {
fmt.Fprintf(out, "\n\n")
}
}
return nil
}
// TODO: use a template
func printNode(out io.Writer, node swarm.Node) {
fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID)
ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name)
if node.Spec.Labels != nil {
fmt.Fprintln(out, "Labels:")
for k, v := range node.Spec.Labels {
fmt.Fprintf(out, " - %s = %s\n", k, v)
}
}
ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname)
fmt.Fprintln(out, "Status:")
fmt.Fprintf(out, " State:\t\t\t%s\n", client.PrettyPrint(node.Status.State))
ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", client.PrettyPrint(node.Status.Message))
fmt.Fprintf(out, " Availability:\t\t%s\n", client.PrettyPrint(node.Spec.Availability))
if node.ManagerStatus != nil {
fmt.Fprintln(out, "Manager Status:")
fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr)
fmt.Fprintf(out, " Raft status:\t\t%s\n", client.PrettyPrint(node.ManagerStatus.Reachability))
leader := "No"
if node.ManagerStatus.Leader {
leader = "Yes"
}
fmt.Fprintf(out, " Leader:\t\t%s\n", leader)
}
fmt.Fprintln(out, "Platform:")
fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS)
fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture)
fmt.Fprintln(out, "Resources:")
fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9)
fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes)))
var pluginTypes []string
pluginNamesByType := map[string][]string{}
for _, p := range node.Description.Engine.Plugins {
// append to pluginTypes only if not done previously
if _, ok := pluginNamesByType[p.Type]; !ok {
pluginTypes = append(pluginTypes, p.Type)
}
pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name)
}
if len(pluginTypes) > 0 {
fmt.Fprintln(out, "Plugins:")
sort.Strings(pluginTypes) // ensure stable output
for _, pluginType := range pluginTypes {
fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", "))
}
}
fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion)
if len(node.Description.Engine.Labels) != 0 {
fmt.Fprintln(out, "Engine Labels:")
for k, v := range node.Description.Engine.Labels {
fmt.Fprintf(out, " - %s = %s", k, v)
}
}
}

119
api/client/node/list.go Normal file
View File

@ -0,0 +1,119 @@
package node
import (
"fmt"
"io"
"text/tabwriter"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/docker/opts"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
)
const (
listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
)
type listOptions struct {
quiet bool
filter opts.FilterOpt
}
func newListCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := listOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
Short: "List nodes in the swarm",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runList(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs")
flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
return cmd
}
func runList(dockerCli *client.DockerCli, opts listOptions) error {
client := dockerCli.Client()
ctx := context.Background()
nodes, err := client.NodeList(
ctx,
types.NodeListOptions{Filter: opts.filter.Value()})
if err != nil {
return err
}
info, err := client.Info(ctx)
if err != nil {
return err
}
out := dockerCli.Out()
if opts.quiet {
printQuiet(out, nodes)
} else {
printTable(out, nodes, info)
}
return nil
}
func printTable(out io.Writer, nodes []swarm.Node, info types.Info) {
writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0)
// Ignore flushing errors
defer writer.Flush()
fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS", "LEADER")
for _, node := range nodes {
name := node.Spec.Name
availability := string(node.Spec.Availability)
membership := string(node.Spec.Membership)
if name == "" {
name = node.Description.Hostname
}
leader := ""
if node.ManagerStatus != nil && node.ManagerStatus.Leader {
leader = "Yes"
}
reachability := ""
if node.ManagerStatus != nil {
reachability = string(node.ManagerStatus.Reachability)
}
ID := node.ID
if node.ID == info.Swarm.NodeID {
ID = ID + " *"
}
fmt.Fprintf(
writer,
listItemFmt,
ID,
name,
client.PrettyPrint(membership),
client.PrettyPrint(string(node.Status.State)),
client.PrettyPrint(availability),
client.PrettyPrint(reachability),
leader)
}
}
func printQuiet(out io.Writer, nodes []swarm.Node) {
for _, node := range nodes {
fmt.Fprintln(out, node.ID)
}
}

50
api/client/node/opts.go Normal file
View File

@ -0,0 +1,50 @@
package node
import (
"fmt"
"strings"
"github.com/docker/engine-api/types/swarm"
)
type nodeOptions struct {
role string
membership string
availability string
}
func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) {
var spec swarm.NodeSpec
switch swarm.NodeRole(strings.ToLower(opts.role)) {
case swarm.NodeRoleWorker:
spec.Role = swarm.NodeRoleWorker
case swarm.NodeRoleManager:
spec.Role = swarm.NodeRoleManager
case "":
default:
return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role)
}
switch swarm.NodeMembership(strings.ToLower(opts.membership)) {
case swarm.NodeMembershipAccepted:
spec.Membership = swarm.NodeMembershipAccepted
case "":
default:
return swarm.NodeSpec{}, fmt.Errorf("invalid membership %q, only accepted is supported", opts.membership)
}
switch swarm.NodeAvailability(strings.ToLower(opts.availability)) {
case swarm.NodeAvailabilityActive:
spec.Availability = swarm.NodeAvailabilityActive
case swarm.NodeAvailabilityPause:
spec.Availability = swarm.NodeAvailabilityPause
case swarm.NodeAvailabilityDrain:
spec.Availability = swarm.NodeAvailabilityDrain
case "":
default:
return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability)
}
return spec, nil
}

View File

@ -0,0 +1,40 @@
package node
import (
"fmt"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newPromoteCommand(dockerCli *client.DockerCli) *cobra.Command {
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "promote NODE [NODE...]",
Short: "Promote a node to a manager in the swarm",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runPromote(dockerCli, flags, args)
},
}
flags = cmd.Flags()
return cmd
}
func runPromote(dockerCli *client.DockerCli, flags *pflag.FlagSet, args []string) error {
for _, id := range args {
if err := runUpdate(dockerCli, id, func(node *swarm.Node) {
node.Spec.Role = swarm.NodeRoleManager
}); err != nil {
return err
}
fmt.Println(id, "attempting to promote a node to a manager in the swarm.")
}
return nil
}

36
api/client/node/remove.go Normal file
View File

@ -0,0 +1,36 @@
package node
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/spf13/cobra"
)
func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command {
return &cobra.Command{
Use: "rm NODE [NODE...]",
Aliases: []string{"remove"},
Short: "Remove a node from the swarm",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runRemove(dockerCli, args)
},
}
}
func runRemove(dockerCli *client.DockerCli, args []string) error {
client := dockerCli.Client()
ctx := context.Background()
for _, nodeID := range args {
err := client.NodeRemove(ctx, nodeID)
if err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID)
}
return nil
}

72
api/client/node/tasks.go Normal file
View File

@ -0,0 +1,72 @@
package node
import (
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/api/client/idresolver"
"github.com/docker/docker/api/client/task"
"github.com/docker/docker/cli"
"github.com/docker/docker/opts"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
)
type tasksOptions struct {
nodeID string
all bool
noResolve bool
filter opts.FilterOpt
}
func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := tasksOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "tasks [OPTIONS] self|NODE",
Short: "List tasks running on a node",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.nodeID = args[0]
return runTasks(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.all, "all", "a", false, "Display all instances")
flags.BoolVarP(&opts.noResolve, "no-resolve", "n", false, "Do not map IDs to Names")
flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
return cmd
}
func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
client := dockerCli.Client()
ctx := context.Background()
nodeRef, err := nodeReference(client, ctx, opts.nodeID)
if err != nil {
return nil
}
node, err := client.NodeInspect(ctx, nodeRef)
if err != nil {
return err
}
filter := opts.filter.Value()
filter.Add("node", node.ID)
if !opts.all {
filter.Add("desired_state", string(swarm.TaskStateRunning))
filter.Add("desired_state", string(swarm.TaskStateAccepted))
}
tasks, err := client.TaskList(
ctx,
types.TaskListOptions{Filter: filter})
if err != nil {
return err
}
return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
}

100
api/client/node/update.go Normal file
View File

@ -0,0 +1,100 @@
package node
import (
"fmt"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
runconfigopts "github.com/docker/docker/runconfig/opts"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/net/context"
)
func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
var opts nodeOptions
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "update [OPTIONS] NODE",
Short: "Update a node",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, args[0], mergeNodeUpdate(flags))
},
}
flags = cmd.Flags()
flags.StringVar(&opts.role, "role", "", "Role of the node (worker/manager)")
flags.StringVar(&opts.membership, "membership", "", "Membership of the node (accepted/rejected)")
flags.StringVar(&opts.availability, "availability", "", "Availability of the node (active/pause/drain)")
return cmd
}
func runUpdate(dockerCli *client.DockerCli, nodeID string, mergeNode func(node *swarm.Node)) error {
client := dockerCli.Client()
ctx := context.Background()
node, err := client.NodeInspect(ctx, nodeID)
if err != nil {
return err
}
mergeNode(&node)
err = client.NodeUpdate(ctx, nodeID, node.Version, node.Spec)
if err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID)
return nil
}
func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) {
return func(node *swarm.Node) {
mergeString := func(flag string, field *string) {
if flags.Changed(flag) {
*field, _ = flags.GetString(flag)
}
}
mergeRole := func(flag string, field *swarm.NodeRole) {
if flags.Changed(flag) {
str, _ := flags.GetString(flag)
*field = swarm.NodeRole(str)
}
}
mergeMembership := func(flag string, field *swarm.NodeMembership) {
if flags.Changed(flag) {
str, _ := flags.GetString(flag)
*field = swarm.NodeMembership(str)
}
}
mergeAvailability := func(flag string, field *swarm.NodeAvailability) {
if flags.Changed(flag) {
str, _ := flags.GetString(flag)
*field = swarm.NodeAvailability(str)
}
}
mergeLabels := func(flag string, field *map[string]string) {
if flags.Changed(flag) {
values, _ := flags.GetStringSlice(flag)
for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
(*field)[key] = value
}
}
}
spec := &node.Spec
mergeString("name", &spec.Name)
// TODO: setting labels is not working
mergeLabels("label", &spec.Labels)
mergeRole("role", &spec.Role)
mergeMembership("membership", &spec.Membership)
mergeAvailability("availability", &spec.Availability)
}
}

32
api/client/service/cmd.go Normal file
View File

@ -0,0 +1,32 @@
package service
import (
"fmt"
"github.com/spf13/cobra"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
)
// NewServiceCommand returns a cobra command for `service` subcommands
func NewServiceCommand(dockerCli *client.DockerCli) *cobra.Command {
cmd := &cobra.Command{
Use: "service",
Short: "Manage docker services",
Args: cli.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
},
}
cmd.AddCommand(
newCreateCommand(dockerCli),
newInspectCommand(dockerCli),
newTasksCommand(dockerCli),
newListCommand(dockerCli),
newRemoveCommand(dockerCli),
newScaleCommand(dockerCli),
newUpdateCommand(dockerCli),
)
return cmd
}

View File

@ -0,0 +1,47 @@
package service
import (
"fmt"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := newServiceOptions()
cmd := &cobra.Command{
Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]",
Short: "Create a new service",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.image = args[0]
if len(args) > 1 {
opts.args = args[1:]
}
return runCreate(dockerCli, opts)
},
}
addServiceFlags(cmd, opts)
cmd.Flags().SetInterspersed(false)
return cmd
}
func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error {
client := dockerCli.Client()
service, err := opts.ToService()
if err != nil {
return err
}
response, err := client.ServiceCreate(context.Background(), service)
if err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID)
return nil
}

View File

@ -0,0 +1,127 @@
package service
import (
"fmt"
"io"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/api/client/inspect"
"github.com/docker/docker/cli"
"github.com/docker/docker/pkg/ioutils"
apiclient "github.com/docker/engine-api/client"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
)
type inspectOptions struct {
refs []string
format string
pretty bool
}
func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
var opts inspectOptions
cmd := &cobra.Command{
Use: "inspect [OPTIONS] SERVICE [SERVICE...]",
Short: "Inspect a service",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.refs = args
if opts.pretty && len(opts.format) > 0 {
return fmt.Errorf("--format is incompatible with human friendly format")
}
return runInspect(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
flags.BoolVarP(&opts.pretty, "pretty", "p", false, "Print the information in a human friendly format.")
return cmd
}
func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
client := dockerCli.Client()
ctx := context.Background()
getRef := func(ref string) (interface{}, []byte, error) {
service, err := client.ServiceInspect(ctx, ref)
if err == nil || !apiclient.IsErrServiceNotFound(err) {
return service, nil, err
}
return nil, nil, fmt.Errorf("Error: no such service: %s", ref)
}
if !opts.pretty {
return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRef)
}
return printHumanFriendly(dockerCli.Out(), opts.refs, getRef)
}
func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error {
for idx, ref := range refs {
obj, _, err := getRef(ref)
if err != nil {
return err
}
printService(out, obj.(swarm.Service))
// TODO: better way to do this?
// print extra space between objects, but not after the last one
if idx+1 != len(refs) {
fmt.Fprintf(out, "\n\n")
}
}
return nil
}
// TODO: use a template
func printService(out io.Writer, service swarm.Service) {
fmt.Fprintf(out, "ID:\t\t%s\n", service.ID)
fmt.Fprintf(out, "Name:\t\t%s\n", service.Spec.Name)
if service.Spec.Labels != nil {
fmt.Fprintln(out, "Labels:")
for k, v := range service.Spec.Labels {
fmt.Fprintf(out, " - %s=%s\n", k, v)
}
}
if service.Spec.Mode.Global != nil {
fmt.Fprintln(out, "Mode:\t\tGLOBAL")
} else {
fmt.Fprintln(out, "Mode:\t\tREPLICATED")
if service.Spec.Mode.Replicated.Replicas != nil {
fmt.Fprintf(out, " Replicas:\t\t%d\n", *service.Spec.Mode.Replicated.Replicas)
}
}
fmt.Fprintln(out, "Placement:")
fmt.Fprintln(out, " Strategy:\tSPREAD")
fmt.Fprintf(out, "UpateConfig:\n")
fmt.Fprintf(out, " Parallelism:\t%d\n", service.Spec.UpdateConfig.Parallelism)
if service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 {
fmt.Fprintf(out, " Delay:\t\t%s\n", service.Spec.UpdateConfig.Delay)
}
fmt.Fprintf(out, "ContainerSpec:\n")
printContainerSpec(out, service.Spec.TaskTemplate.ContainerSpec)
}
func printContainerSpec(out io.Writer, containerSpec swarm.ContainerSpec) {
fmt.Fprintf(out, " Image:\t\t%s\n", containerSpec.Image)
if len(containerSpec.Command) > 0 {
fmt.Fprintf(out, " Command:\t%s\n", strings.Join(containerSpec.Command, " "))
}
if len(containerSpec.Args) > 0 {
fmt.Fprintf(out, " Args:\t%s\n", strings.Join(containerSpec.Args, " "))
}
if len(containerSpec.Env) > 0 {
fmt.Fprintf(out, " Env:\t\t%s\n", strings.Join(containerSpec.Env, " "))
}
ioutils.FprintfIfNotEmpty(out, " Dir\t\t%s\n", containerSpec.Dir)
ioutils.FprintfIfNotEmpty(out, " User\t\t%s\n", containerSpec.User)
}

View File

@ -0,0 +1,97 @@
package service
import (
"fmt"
"io"
"strings"
"text/tabwriter"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
)
const (
listItemFmt = "%s\t%s\t%s\t%s\t%s\n"
)
type listOptions struct {
quiet bool
filter opts.FilterOpt
}
func newListCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := listOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
Short: "List services",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runList(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs")
flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
return cmd
}
func runList(dockerCli *client.DockerCli, opts listOptions) error {
client := dockerCli.Client()
services, err := client.ServiceList(
context.Background(),
types.ServiceListOptions{Filter: opts.filter.Value()})
if err != nil {
return err
}
out := dockerCli.Out()
if opts.quiet {
printQuiet(out, services)
} else {
printTable(out, services)
}
return nil
}
func printTable(out io.Writer, services []swarm.Service) {
writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0)
// Ignore flushing errors
defer writer.Flush()
fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "SCALE", "IMAGE", "COMMAND")
for _, service := range services {
scale := ""
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
scale = fmt.Sprintf("%d", *service.Spec.Mode.Replicated.Replicas)
} else if service.Spec.Mode.Global != nil {
scale = "global"
}
fmt.Fprintf(
writer,
listItemFmt,
stringid.TruncateID(service.ID),
service.Spec.Name,
scale,
service.Spec.TaskTemplate.ContainerSpec.Image,
strings.Join(service.Spec.TaskTemplate.ContainerSpec.Args, " "))
}
}
func printQuiet(out io.Writer, services []swarm.Service) {
for _, service := range services {
fmt.Fprintln(out, service.ID)
}
}

462
api/client/service/opts.go Normal file
View File

@ -0,0 +1,462 @@
package service
import (
"encoding/csv"
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/docker/docker/opts"
runconfigopts "github.com/docker/docker/runconfig/opts"
"github.com/docker/engine-api/types/swarm"
"github.com/docker/go-connections/nat"
units "github.com/docker/go-units"
"github.com/spf13/cobra"
)
var (
// DefaultReplicas is the default replicas to use for a replicated service
DefaultReplicas uint64 = 1
)
type int64Value interface {
Value() int64
}
type memBytes int64
func (m *memBytes) String() string {
return strconv.FormatInt(m.Value(), 10)
}
func (m *memBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = memBytes(val)
return err
}
func (m *memBytes) Type() string {
return "MemoryBytes"
}
func (m *memBytes) Value() int64 {
return int64(*m)
}
type nanoCPUs int64
func (c *nanoCPUs) String() string {
return strconv.FormatInt(c.Value(), 10)
}
func (c *nanoCPUs) Set(value string) error {
cpu, ok := new(big.Rat).SetString(value)
if !ok {
return fmt.Errorf("Failed to parse %v as a rational number", value)
}
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
if !nano.IsInt() {
return fmt.Errorf("value is too precise")
}
*c = nanoCPUs(nano.Num().Int64())
return nil
}
func (c *nanoCPUs) Type() string {
return "NanoCPUs"
}
func (c *nanoCPUs) Value() int64 {
return int64(*c)
}
// DurationOpt is an option type for time.Duration that uses a pointer. This
// allows us to get nil values outside, instead of defaulting to 0
type DurationOpt struct {
value *time.Duration
}
// Set a new value on the option
func (d *DurationOpt) Set(s string) error {
v, err := time.ParseDuration(s)
d.value = &v
return err
}
// Type returns the type of this option
func (d *DurationOpt) Type() string {
return "duration-ptr"
}
// String returns a string repr of this option
func (d *DurationOpt) String() string {
if d.value != nil {
return d.value.String()
}
return "none"
}
// Value returns the time.Duration
func (d *DurationOpt) Value() *time.Duration {
return d.value
}
// Uint64Opt represents a uint64.
type Uint64Opt struct {
value *uint64
}
// Set a new value on the option
func (i *Uint64Opt) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
i.value = &v
return err
}
// Type returns the type of this option
func (i *Uint64Opt) Type() string {
return "uint64-ptr"
}
// String returns a string repr of this option
func (i *Uint64Opt) String() string {
if i.value != nil {
return fmt.Sprintf("%v", *i.value)
}
return "none"
}
// Value returns the uint64
func (i *Uint64Opt) Value() *uint64 {
return i.value
}
// MountOpt is a Value type for parsing mounts
type MountOpt struct {
values []swarm.Mount
}
// Set a new mount value
func (m *MountOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
mount := swarm.Mount{}
volumeOptions := func() *swarm.VolumeOptions {
if mount.VolumeOptions == nil {
mount.VolumeOptions = &swarm.VolumeOptions{
Labels: make(map[string]string),
}
}
return mount.VolumeOptions
}
setValueOnMap := func(target map[string]string, value string) {
parts := strings.SplitN(value, "=", 2)
if len(parts) == 1 {
target[value] = ""
} else {
target[parts[0]] = parts[1]
}
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) == 1 && strings.ToLower(parts[0]) == "writable" {
mount.Writable = true
continue
}
if len(parts) != 2 {
return fmt.Errorf("invald field '%s' must be a key=value pair", field)
}
key, value := parts[0], parts[1]
switch strings.ToLower(key) {
case "type":
mount.Type = swarm.MountType(strings.ToUpper(value))
case "source":
mount.Source = value
case "target":
mount.Target = value
case "writable":
mount.Writable, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invald value for writable: %s", err.Error())
}
case "bind-propagation":
mount.BindOptions.Propagation = swarm.MountPropagation(strings.ToUpper(value))
case "volume-populate":
volumeOptions().Populate, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invald value for populate: %s", err.Error())
}
case "volume-label":
setValueOnMap(volumeOptions().Labels, value)
case "volume-driver":
volumeOptions().DriverConfig.Name = value
case "volume-driver-opt":
if volumeOptions().DriverConfig.Options == nil {
volumeOptions().DriverConfig.Options = make(map[string]string)
}
setValueOnMap(volumeOptions().DriverConfig.Options, value)
default:
return fmt.Errorf("unexpected key '%s' in '%s'", key, value)
}
}
if mount.Type == "" {
return fmt.Errorf("type is required")
}
if mount.Target == "" {
return fmt.Errorf("target is required")
}
m.values = append(m.values, mount)
return nil
}
// Type returns the type of this option
func (m *MountOpt) Type() string {
return "mount"
}
// String returns a string repr of this option
func (m *MountOpt) String() string {
mounts := []string{}
for _, mount := range m.values {
mounts = append(mounts, fmt.Sprintf("%v", mount))
}
return strings.Join(mounts, ", ")
}
// Value returns the mounts
func (m *MountOpt) Value() []swarm.Mount {
return m.values
}
type updateOptions struct {
parallelism uint64
delay time.Duration
}
type resourceOptions struct {
limitCPU nanoCPUs
limitMemBytes memBytes
resCPU nanoCPUs
resMemBytes memBytes
}
func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements {
return &swarm.ResourceRequirements{
Limits: &swarm.Resources{
NanoCPUs: r.limitCPU.Value(),
MemoryBytes: r.limitMemBytes.Value(),
},
Reservations: &swarm.Resources{
NanoCPUs: r.resCPU.Value(),
MemoryBytes: r.resMemBytes.Value(),
},
}
}
type restartPolicyOptions struct {
condition string
delay DurationOpt
maxAttempts Uint64Opt
window DurationOpt
}
func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy {
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyCondition(r.condition),
Delay: r.delay.Value(),
MaxAttempts: r.maxAttempts.Value(),
Window: r.window.Value(),
}
}
func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig {
nets := []swarm.NetworkAttachmentConfig{}
for _, network := range networks {
nets = append(nets, swarm.NetworkAttachmentConfig{Target: network})
}
return nets
}
type endpointOptions struct {
mode string
ports opts.ListOpts
}
func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec {
portConfigs := []swarm.PortConfig{}
// We can ignore errors because the format was already validated by ValidatePort
ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll())
for port := range ports {
portConfigs = append(portConfigs, convertPortToPortConfig(port, portBindings)...)
}
return &swarm.EndpointSpec{
Mode: swarm.ResolutionMode(e.mode),
Ports: portConfigs,
}
}
func convertPortToPortConfig(
port nat.Port,
portBindings map[nat.Port][]nat.PortBinding,
) []swarm.PortConfig {
ports := []swarm.PortConfig{}
for _, binding := range portBindings[port] {
hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16)
ports = append(ports, swarm.PortConfig{
//TODO Name: ?
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
TargetPort: uint32(port.Int()),
PublishedPort: uint32(hostPort),
})
}
return ports
}
// ValidatePort validates a string is in the expected format for a port definition
func ValidatePort(value string) (string, error) {
portMappings, err := nat.ParsePortSpec(value)
for _, portMapping := range portMappings {
if portMapping.Binding.HostIP != "" {
return "", fmt.Errorf("HostIP is not supported by a service.")
}
}
return value, err
}
type serviceOptions struct {
name string
labels opts.ListOpts
image string
command []string
args []string
env opts.ListOpts
workdir string
user string
mounts MountOpt
resources resourceOptions
stopGrace DurationOpt
replicas Uint64Opt
mode string
restartPolicy restartPolicyOptions
constraints []string
update updateOptions
networks []string
endpoint endpointOptions
}
func newServiceOptions() *serviceOptions {
return &serviceOptions{
labels: opts.NewListOpts(runconfigopts.ValidateEnv),
env: opts.NewListOpts(runconfigopts.ValidateEnv),
endpoint: endpointOptions{
ports: opts.NewListOpts(ValidatePort),
},
}
}
func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) {
var service swarm.ServiceSpec
service = swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: opts.name,
Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()),
},
TaskTemplate: swarm.TaskSpec{
ContainerSpec: swarm.ContainerSpec{
Image: opts.image,
Command: opts.command,
Args: opts.args,
Env: opts.env.GetAll(),
Dir: opts.workdir,
User: opts.user,
Mounts: opts.mounts.Value(),
StopGracePeriod: opts.stopGrace.Value(),
},
Resources: opts.resources.ToResourceRequirements(),
RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
Placement: &swarm.Placement{
Constraints: opts.constraints,
},
},
Mode: swarm.ServiceMode{},
UpdateConfig: &swarm.UpdateConfig{
Parallelism: opts.update.parallelism,
Delay: opts.update.delay,
},
Networks: convertNetworks(opts.networks),
EndpointSpec: opts.endpoint.ToEndpointSpec(),
}
switch opts.mode {
case "global":
if opts.replicas.Value() != nil {
return service, fmt.Errorf("replicas can only be used with replicated mode")
}
service.Mode.Global = &swarm.GlobalService{}
case "replicated":
service.Mode.Replicated = &swarm.ReplicatedService{
Replicas: opts.replicas.Value(),
}
default:
return service, fmt.Errorf("Unknown mode: %s", opts.mode)
}
return service, nil
}
// addServiceFlags adds all flags that are common to both `create` and `update.
// Any flags that are not common are added separately in the individual command
func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
flags := cmd.Flags()
flags.StringVar(&opts.name, "name", "", "Service name")
flags.VarP(&opts.labels, "label", "l", "Service labels")
flags.VarP(&opts.env, "env", "e", "Set environment variables")
flags.StringVarP(&opts.workdir, "workdir", "w", "", "Working directory inside the container")
flags.StringVarP(&opts.user, "user", "u", "", "Username or UID")
flags.VarP(&opts.mounts, "mount", "m", "Attach a mount to the service")
flags.Var(&opts.resources.limitCPU, "limit-cpu", "Limit CPUs")
flags.Var(&opts.resources.limitMemBytes, "limit-memory", "Limit Memory")
flags.Var(&opts.resources.resCPU, "reserve-cpu", "Reserve CPUs")
flags.Var(&opts.resources.resMemBytes, "reserve-memory", "Reserve Memory")
flags.Var(&opts.stopGrace, "stop-grace-period", "Time to wait before force killing a container")
flags.StringVar(&opts.mode, "mode", "replicated", "Service mode (replicated or global)")
flags.Var(&opts.replicas, "replicas", "Number of tasks")
flags.StringVar(&opts.restartPolicy.condition, "restart-condition", "", "Restart when condition is met (none, on_failure, or any)")
flags.Var(&opts.restartPolicy.delay, "restart-delay", "Delay between restart attempts")
flags.Var(&opts.restartPolicy.maxAttempts, "restart-max-attempts", "Maximum number of restarts before giving up")
flags.Var(&opts.restartPolicy.window, "restart-window", "Window used to evalulate the restart policy")
flags.StringSliceVar(&opts.constraints, "constraint", []string{}, "Placement constraints")
flags.Uint64Var(&opts.update.parallelism, "update-parallelism", 1, "Maximum number of tasks updated simultaneously")
flags.DurationVar(&opts.update.delay, "update-delay", time.Duration(0), "Delay between updates")
flags.StringSliceVar(&opts.networks, "network", []string{}, "Network attachments")
flags.StringVar(&opts.endpoint.mode, "endpoint-mode", "", "Endpoint mode(Valid values: VIP, DNSRR)")
flags.VarP(&opts.endpoint.ports, "publish", "p", "Publish a port as a node port")
}

View File

@ -0,0 +1,47 @@
package service
import (
"fmt"
"strings"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command {
cmd := &cobra.Command{
Use: "rm [OPTIONS] SERVICE",
Aliases: []string{"remove"},
Short: "Remove a service",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runRemove(dockerCli, args)
},
}
cmd.Flags()
return cmd
}
func runRemove(dockerCli *client.DockerCli, sids []string) error {
client := dockerCli.Client()
ctx := context.Background()
var errs []string
for _, sid := range sids {
err := client.ServiceRemove(ctx, sid)
if err != nil {
errs = append(errs, err.Error())
continue
}
fmt.Fprintf(dockerCli.Out(), "%s\n", sid)
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, "\n"))
}
return nil
}

View File

@ -0,0 +1,86 @@
package service
import (
"fmt"
"strconv"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/spf13/cobra"
)
func newScaleCommand(dockerCli *client.DockerCli) *cobra.Command {
return &cobra.Command{
Use: "scale SERVICE=SCALE [SERVICE=SCALE...]",
Short: "Scale one or multiple services",
Args: scaleArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runScale(dockerCli, args)
},
}
}
func scaleArgs(cmd *cobra.Command, args []string) error {
if err := cli.RequiresMinArgs(1)(cmd, args); err != nil {
return err
}
for _, arg := range args {
if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 {
return fmt.Errorf(
"Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
arg,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
return nil
}
func runScale(dockerCli *client.DockerCli, args []string) error {
var errors []string
for _, arg := range args {
parts := strings.SplitN(arg, "=", 2)
serviceID, scale := parts[0], parts[1]
if err := runServiceScale(dockerCli, serviceID, scale); err != nil {
errors = append(errors, fmt.Sprintf("%s: %s", serviceID, err.Error()))
}
}
if len(errors) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errors, "\n"))
}
func runServiceScale(dockerCli *client.DockerCli, serviceID string, scale string) error {
client := dockerCli.Client()
ctx := context.Background()
service, err := client.ServiceInspect(ctx, serviceID)
if err != nil {
return err
}
serviceMode := &service.Spec.Mode
if serviceMode.Replicated == nil {
return fmt.Errorf("scale can only be used with replicated mode")
}
uintScale, err := strconv.ParseUint(scale, 10, 64)
if err != nil {
return fmt.Errorf("invalid replicas value %s: %s", scale, err.Error())
}
serviceMode.Replicated.Replicas = &uintScale
err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec)
if err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "%s scaled to %s\n", serviceID, scale)
return nil
}

View File

@ -0,0 +1,65 @@
package service
import (
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/api/client/idresolver"
"github.com/docker/docker/api/client/task"
"github.com/docker/docker/cli"
"github.com/docker/docker/opts"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
)
type tasksOptions struct {
serviceID string
all bool
noResolve bool
filter opts.FilterOpt
}
func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := tasksOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "tasks [OPTIONS] SERVICE",
Short: "List the tasks of a service",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.serviceID = args[0]
return runTasks(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.all, "all", "a", false, "Display all tasks")
flags.BoolVarP(&opts.noResolve, "no-resolve", "n", false, "Do not map IDs to Names")
flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
return cmd
}
func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
client := dockerCli.Client()
ctx := context.Background()
service, err := client.ServiceInspect(ctx, opts.serviceID)
if err != nil {
return err
}
filter := opts.filter.Value()
filter.Add("service", service.ID)
if !opts.all && !filter.Include("desired_state") {
filter.Add("desired_state", string(swarm.TaskStateRunning))
filter.Add("desired_state", string(swarm.TaskStateAccepted))
}
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
if err != nil {
return err
}
return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
}

View File

@ -0,0 +1,244 @@
package service
import (
"fmt"
"time"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/docker/opts"
runconfigopts "github.com/docker/docker/runconfig/opts"
"github.com/docker/engine-api/types/swarm"
"github.com/docker/go-connections/nat"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := newServiceOptions()
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "update [OPTIONS] SERVICE",
Short: "Update a service",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, flags, args[0])
},
}
flags = cmd.Flags()
flags.String("image", "", "Service image tag")
flags.StringSlice("command", []string{}, "Service command")
flags.StringSlice("arg", []string{}, "Service command args")
addServiceFlags(cmd, opts)
return cmd
}
func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID string) error {
client := dockerCli.Client()
ctx := context.Background()
service, err := client.ServiceInspect(ctx, serviceID)
if err != nil {
return err
}
err = mergeService(&service.Spec, flags)
if err != nil {
return err
}
err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec)
if err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID)
return nil
}
func mergeService(spec *swarm.ServiceSpec, flags *pflag.FlagSet) error {
mergeString := func(flag string, field *string) {
if flags.Changed(flag) {
*field, _ = flags.GetString(flag)
}
}
mergeListOpts := func(flag string, field *[]string) {
if flags.Changed(flag) {
value := flags.Lookup(flag).Value.(*opts.ListOpts)
*field = value.GetAll()
}
}
mergeSlice := func(flag string, field *[]string) {
if flags.Changed(flag) {
*field, _ = flags.GetStringSlice(flag)
}
}
mergeInt64Value := func(flag string, field *int64) {
if flags.Changed(flag) {
*field = flags.Lookup(flag).Value.(int64Value).Value()
}
}
mergeDuration := func(flag string, field *time.Duration) {
if flags.Changed(flag) {
*field, _ = flags.GetDuration(flag)
}
}
mergeDurationOpt := func(flag string, field *time.Duration) {
if flags.Changed(flag) {
*field = *flags.Lookup(flag).Value.(*DurationOpt).Value()
}
}
mergeUint64 := func(flag string, field *uint64) {
if flags.Changed(flag) {
*field, _ = flags.GetUint64(flag)
}
}
mergeUint64Opt := func(flag string, field *uint64) {
if flags.Changed(flag) {
*field = *flags.Lookup(flag).Value.(*Uint64Opt).Value()
}
}
cspec := &spec.TaskTemplate.ContainerSpec
task := &spec.TaskTemplate
mergeString("name", &spec.Name)
mergeLabels(flags, &spec.Labels)
mergeString("image", &cspec.Image)
mergeSlice("command", &cspec.Command)
mergeSlice("arg", &cspec.Command)
mergeListOpts("env", &cspec.Env)
mergeString("workdir", &cspec.Dir)
mergeString("user", &cspec.User)
mergeMounts(flags, &cspec.Mounts)
mergeInt64Value("limit-cpu", &task.Resources.Limits.NanoCPUs)
mergeInt64Value("limit-memory", &task.Resources.Limits.MemoryBytes)
mergeInt64Value("reserve-cpu", &task.Resources.Reservations.NanoCPUs)
mergeInt64Value("reserve-memory", &task.Resources.Reservations.MemoryBytes)
mergeDurationOpt("stop-grace-period", cspec.StopGracePeriod)
if flags.Changed("restart-policy-condition") {
value, _ := flags.GetString("restart-policy-condition")
task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value)
}
mergeDurationOpt("restart-policy-delay", task.RestartPolicy.Delay)
mergeUint64Opt("restart-policy-max-attempts", task.RestartPolicy.MaxAttempts)
mergeDurationOpt("restart-policy-window", task.RestartPolicy.Window)
mergeSlice("constraint", &task.Placement.Constraints)
if err := mergeMode(flags, &spec.Mode); err != nil {
return err
}
mergeUint64("updateconfig-parallelism", &spec.UpdateConfig.Parallelism)
mergeDuration("updateconfig-delay", &spec.UpdateConfig.Delay)
mergeNetworks(flags, &spec.Networks)
if flags.Changed("endpoint-mode") {
value, _ := flags.GetString("endpoint-mode")
spec.EndpointSpec.Mode = swarm.ResolutionMode(value)
}
mergePorts(flags, &spec.EndpointSpec.Ports)
return nil
}
func mergeLabels(flags *pflag.FlagSet, field *map[string]string) {
if !flags.Changed("label") {
return
}
if *field == nil {
*field = make(map[string]string)
}
values := flags.Lookup("label").Value.(*opts.ListOpts).GetAll()
for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
(*field)[key] = value
}
}
// TODO: should this override by destination path, or does swarm handle that?
func mergeMounts(flags *pflag.FlagSet, mounts *[]swarm.Mount) {
if !flags.Changed("mount") {
return
}
values := flags.Lookup("mount").Value.(*MountOpt).Value()
*mounts = append(*mounts, values...)
}
// TODO: should this override by name, or does swarm handle that?
func mergePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) {
if !flags.Changed("ports") {
return
}
values := flags.Lookup("ports").Value.(*opts.ListOpts).GetAll()
ports, portBindings, _ := nat.ParsePortSpecs(values)
for port := range ports {
*portConfig = append(*portConfig, convertPortToPortConfig(port, portBindings)...)
}
}
func mergeNetworks(flags *pflag.FlagSet, attachments *[]swarm.NetworkAttachmentConfig) {
if !flags.Changed("network") {
return
}
networks, _ := flags.GetStringSlice("network")
for _, network := range networks {
*attachments = append(*attachments, swarm.NetworkAttachmentConfig{Target: network})
}
}
func mergeMode(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error {
if !flags.Changed("mode") && !flags.Changed("scale") {
return nil
}
var mode string
if flags.Changed("mode") {
mode, _ = flags.GetString("mode")
}
if !(mode == "replicated" || serviceMode.Replicated != nil) && flags.Changed("replicas") {
return fmt.Errorf("replicas can only be used with replicated mode")
}
if mode == "global" {
serviceMode.Replicated = nil
serviceMode.Global = &swarm.GlobalService{}
return nil
}
if flags.Changed("replicas") {
replicas := flags.Lookup("replicas").Value.(*Uint64Opt).Value()
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
serviceMode.Global = nil
return nil
}
if mode == "replicated" {
if serviceMode.Replicated != nil {
return nil
}
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: &DefaultReplicas}
serviceMode.Global = nil
}
return nil
}

30
api/client/swarm/cmd.go Normal file
View File

@ -0,0 +1,30 @@
package swarm
import (
"fmt"
"github.com/spf13/cobra"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
)
// NewSwarmCommand returns a cobra command for `swarm` subcommands
func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command {
cmd := &cobra.Command{
Use: "swarm",
Short: "Manage docker swarm",
Args: cli.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
},
}
cmd.AddCommand(
newInitCommand(dockerCli),
newJoinCommand(dockerCli),
newUpdateCommand(dockerCli),
newLeaveCommand(dockerCli),
newInspectCommand(dockerCli),
)
return cmd
}

61
api/client/swarm/init.go Normal file
View File

@ -0,0 +1,61 @@
package swarm
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
)
type initOptions struct {
listenAddr NodeAddrOption
autoAccept AutoAcceptOption
forceNewCluster bool
secret string
}
func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := initOptions{
listenAddr: NewNodeAddrOption(),
autoAccept: NewAutoAcceptOption(),
}
cmd := &cobra.Command{
Use: "init",
Short: "Initialize a Swarm.",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runInit(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.Var(&opts.listenAddr, "listen-addr", "Listen address")
flags.Var(&opts.autoAccept, "auto-accept", "Auto acceptance policy (worker, manager, or none)")
flags.StringVar(&opts.secret, "secret", "", "Set secret value needed to accept nodes into cluster")
flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.")
return cmd
}
func runInit(dockerCli *client.DockerCli, opts initOptions) error {
client := dockerCli.Client()
ctx := context.Background()
req := swarm.InitRequest{
ListenAddr: opts.listenAddr.String(),
ForceNewCluster: opts.forceNewCluster,
}
req.Spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(opts.secret)
nodeID, err := client.SwarmInit(ctx, req)
if err != nil {
return err
}
fmt.Printf("Swarm initialized: current node (%s) is now a manager.\n", nodeID)
return nil
}

View File

@ -0,0 +1,56 @@
package swarm
import (
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/api/client/inspect"
"github.com/docker/docker/cli"
"github.com/spf13/cobra"
)
type inspectOptions struct {
format string
// pretty bool
}
func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
var opts inspectOptions
cmd := &cobra.Command{
Use: "inspect [OPTIONS]",
Short: "Inspect the Swarm",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
// if opts.pretty && len(opts.format) > 0 {
// return fmt.Errorf("--format is incompatible with human friendly format")
// }
return runInspect(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
//flags.BoolVarP(&opts.pretty, "pretty", "h", false, "Print the information in a human friendly format.")
return cmd
}
func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
client := dockerCli.Client()
ctx := context.Background()
swarm, err := client.SwarmInspect(ctx)
if err != nil {
return err
}
getRef := func(_ string) (interface{}, []byte, error) {
return swarm, nil, nil
}
// if !opts.pretty {
return inspect.Inspect(dockerCli.Out(), []string{""}, opts.format, getRef)
// }
//return printHumanFriendly(dockerCli.Out(), opts.refs, getRef)
}

65
api/client/swarm/join.go Normal file
View File

@ -0,0 +1,65 @@
package swarm
import (
"fmt"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type joinOptions struct {
remote string
listenAddr NodeAddrOption
manager bool
secret string
CACertHash string
}
func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := joinOptions{
listenAddr: NodeAddrOption{addr: defaultListenAddr},
}
cmd := &cobra.Command{
Use: "join [OPTIONS] HOST:PORT",
Short: "Join a Swarm as a node and/or manager.",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.remote = args[0]
return runJoin(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.Var(&opts.listenAddr, "listen-addr", "Listen address")
flags.BoolVar(&opts.manager, "manager", false, "Try joining as a manager.")
flags.StringVar(&opts.secret, "secret", "", "Secret for node acceptance")
flags.StringVar(&opts.CACertHash, "ca-hash", "", "Hash of the Root Certificate Authority certificate used for trusted join")
return cmd
}
func runJoin(dockerCli *client.DockerCli, opts joinOptions) error {
client := dockerCli.Client()
ctx := context.Background()
req := swarm.JoinRequest{
Manager: opts.manager,
Secret: opts.secret,
ListenAddr: opts.listenAddr.String(),
RemoteAddrs: []string{opts.remote},
CACertHash: opts.CACertHash,
}
err := client.SwarmJoin(ctx, req)
if err != nil {
return err
}
if opts.manager {
fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a manager.")
} else {
fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a worker.")
}
return nil
}

44
api/client/swarm/leave.go Normal file
View File

@ -0,0 +1,44 @@
package swarm
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/spf13/cobra"
)
type leaveOptions struct {
force bool
}
func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := leaveOptions{}
cmd := &cobra.Command{
Use: "leave",
Short: "Leave a Swarm.",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runLeave(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVar(&opts.force, "force", false, "Force leave ignoring warnings.")
return cmd
}
func runLeave(dockerCli *client.DockerCli, opts leaveOptions) error {
client := dockerCli.Client()
ctx := context.Background()
if err := client.SwarmLeave(ctx, opts.force); err != nil {
return err
}
fmt.Fprintln(dockerCli.Out(), "Node left the default swarm.")
return nil
}

120
api/client/swarm/opts.go Normal file
View File

@ -0,0 +1,120 @@
package swarm
import (
"fmt"
"strings"
"github.com/docker/engine-api/types/swarm"
)
const (
defaultListenAddr = "0.0.0.0:2377"
// WORKER constant for worker name
WORKER = "WORKER"
// MANAGER constant for manager name
MANAGER = "MANAGER"
)
var (
defaultPolicies = []swarm.Policy{
{Role: WORKER, Autoaccept: true},
{Role: MANAGER, Autoaccept: false},
}
)
// NodeAddrOption is a pflag.Value for listen and remote addresses
type NodeAddrOption struct {
addr string
}
// String prints the representation of this flag
func (a *NodeAddrOption) String() string {
return a.addr
}
// Set the value for this flag
func (a *NodeAddrOption) Set(value string) error {
if !strings.Contains(value, ":") {
return fmt.Errorf("Invalud url, a host and port are required")
}
parts := strings.Split(value, ":")
if len(parts) != 2 {
return fmt.Errorf("Invalud url, too many colons")
}
a.addr = value
return nil
}
// Type returns the type of this flag
func (a *NodeAddrOption) Type() string {
return "node-addr"
}
// NewNodeAddrOption returns a new node address option
func NewNodeAddrOption() NodeAddrOption {
return NodeAddrOption{addr: defaultListenAddr}
}
// AutoAcceptOption is a value type for auto-accept policy
type AutoAcceptOption struct {
values map[string]bool
}
// String prints a string representation of this option
func (o *AutoAcceptOption) String() string {
keys := []string{}
for key := range o.values {
keys = append(keys, key)
}
return strings.Join(keys, " ")
}
// Set sets a new value on this option
func (o *AutoAcceptOption) Set(value string) error {
value = strings.ToUpper(value)
switch value {
case "", "NONE":
if accept, ok := o.values[WORKER]; ok && accept {
return fmt.Errorf("value NONE is incompatible with %s", WORKER)
}
if accept, ok := o.values[MANAGER]; ok && accept {
return fmt.Errorf("value NONE is incompatible with %s", MANAGER)
}
o.values[WORKER] = false
o.values[MANAGER] = false
case WORKER, MANAGER:
if accept, ok := o.values[value]; ok && !accept {
return fmt.Errorf("value NONE is incompatible with %s", value)
}
o.values[value] = true
default:
return fmt.Errorf("must be one of %s, %s, NONE", WORKER, MANAGER)
}
return nil
}
// Type returns the type of this option
func (o *AutoAcceptOption) Type() string {
return "auto-accept"
}
// Policies returns a representation of this option for the api
func (o *AutoAcceptOption) Policies(secret string) []swarm.Policy {
policies := []swarm.Policy{}
for _, p := range defaultPolicies {
if len(o.values) != 0 {
p.Autoaccept = o.values[string(p.Role)]
}
p.Secret = secret
policies = append(policies, p)
}
return policies
}
// NewAutoAcceptOption returns a new auto-accept option
func NewAutoAcceptOption() AutoAcceptOption {
return AutoAcceptOption{values: make(map[string]bool)}
}

View File

@ -0,0 +1,93 @@
package swarm
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/cli"
"github.com/docker/engine-api/types/swarm"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type updateOptions struct {
autoAccept AutoAcceptOption
secret string
taskHistoryLimit int64
heartbeatPeriod uint64
}
func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
opts := updateOptions{autoAccept: NewAutoAcceptOption()}
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "update",
Short: "update the Swarm.",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, flags, opts)
},
}
flags = cmd.Flags()
flags.Var(&opts.autoAccept, "auto-accept", "Auto acceptance policy (worker, manager or none)")
flags.StringVar(&opts.secret, "secret", "", "Set secret value needed to accept nodes into cluster")
flags.Int64Var(&opts.taskHistoryLimit, "task-history-limit", 10, "Task history retention limit")
flags.Uint64Var(&opts.heartbeatPeriod, "dispatcher-heartbeat-period", 5000000000, "Dispatcher heartbeat period")
return cmd
}
func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts updateOptions) error {
client := dockerCli.Client()
ctx := context.Background()
swarm, err := client.SwarmInspect(ctx)
if err != nil {
return err
}
err = mergeSwarm(&swarm, flags)
if err != nil {
return err
}
err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec)
if err != nil {
return err
}
fmt.Println("Swarm updated.")
return nil
}
func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
spec := &swarm.Spec
if flags.Changed("auto-accept") {
value := flags.Lookup("auto-accept").Value.(*AutoAcceptOption)
if len(spec.AcceptancePolicy.Policies) > 0 {
spec.AcceptancePolicy.Policies = value.Policies(spec.AcceptancePolicy.Policies[0].Secret)
} else {
spec.AcceptancePolicy.Policies = value.Policies("")
}
}
if flags.Changed("secret") {
secret, _ := flags.GetString("secret")
for _, policy := range spec.AcceptancePolicy.Policies {
policy.Secret = secret
}
}
if flags.Changed("task-history-limit") {
spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64("task-history-limit")
}
if flags.Changed("dispatcher-heartbeat-period") {
spec.Dispatcher.HeartbeatPeriod, _ = flags.GetUint64("dispatcher-heartbeat-period")
}
return nil
}

20
api/client/tag.go Normal file
View File

@ -0,0 +1,20 @@
package client
import (
"golang.org/x/net/context"
Cli "github.com/docker/docker/cli"
flag "github.com/docker/docker/pkg/mflag"
)
// CmdTag tags an image into a repository.
//
// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
func (cli *DockerCli) CmdTag(args ...string) error {
cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true)
cmd.Require(flag.Exact, 2)
cmd.ParseFlags(args, true)
return cli.client.ImageTag(context.Background(), cmd.Arg(0), cmd.Arg(1))
}

79
api/client/task/print.go Normal file
View File

@ -0,0 +1,79 @@
package task
import (
"fmt"
"sort"
"strings"
"text/tabwriter"
"time"
"golang.org/x/net/context"
"github.com/docker/docker/api/client"
"github.com/docker/docker/api/client/idresolver"
"github.com/docker/engine-api/types/swarm"
"github.com/docker/go-units"
)
const (
psTaskItemFmt = "%s\t%s\t%s\t%s\t%s %s\t%s\t%s\n"
)
type tasksBySlot []swarm.Task
func (t tasksBySlot) Len() int {
return len(t)
}
func (t tasksBySlot) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t tasksBySlot) Less(i, j int) bool {
// Sort by slot.
if t[i].Slot != t[j].Slot {
return t[i].Slot < t[j].Slot
}
// If same slot, sort by most recent.
return t[j].Meta.CreatedAt.Before(t[i].CreatedAt)
}
// Print task information in a table format
func Print(dockerCli *client.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver) error {
sort.Stable(tasksBySlot(tasks))
writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0)
// Ignore flushing errors
defer writer.Flush()
fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "SERVICE", "IMAGE", "LAST STATE", "DESIRED STATE", "NODE"}, "\t"))
for _, task := range tasks {
serviceValue, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID)
if err != nil {
return err
}
nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID)
if err != nil {
return err
}
name := serviceValue
if task.Slot > 0 {
name = fmt.Sprintf("%s.%d", name, task.Slot)
}
fmt.Fprintf(
writer,
psTaskItemFmt,
task.ID,
name,
serviceValue,
task.Spec.ContainerSpec.Image,
client.PrettyPrint(task.Status.State),
units.HumanDuration(time.Since(task.Status.Timestamp)),
client.PrettyPrint(task.DesiredState),
nodeValue,
)
}
return nil
}

View File

@ -8,6 +8,7 @@ import (
gosignal "os/signal"
"path/filepath"
"runtime"
"strings"
"time"
"golang.org/x/net/context"
@ -163,3 +164,27 @@ func (cli *DockerCli) ForwardAllSignals(ctx context.Context, cid string) chan os
}()
return sigc
}
// capitalizeFirst capitalizes the first character of string
func capitalizeFirst(s string) string {
switch l := len(s); l {
case 0:
return s
case 1:
return strings.ToLower(s)
default:
return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
}
}
// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
func PrettyPrint(i interface{}) string {
switch t := i.(type) {
case nil:
return "None"
case string:
return capitalizeFirst(t)
default:
return capitalizeFirst(fmt.Sprintf("%s", t))
}
}

View File

@ -8,6 +8,7 @@ import (
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/versions"
"github.com/gorilla/mux"
"google.golang.org/grpc"
)
// httpStatusError is an interface
@ -58,6 +59,7 @@ func GetHTTPErrorStatusCode(err error) int {
"wrong login/password": http.StatusUnauthorized,
"unauthorized": http.StatusUnauthorized,
"hasn't been activated": http.StatusForbidden,
"this node": http.StatusNotAcceptable,
} {
if strings.Contains(errStr, keyword) {
statusCode = status
@ -85,7 +87,7 @@ func MakeErrorHandler(err error) http.HandlerFunc {
}
WriteJSON(w, statusCode, response)
} else {
http.Error(w, err.Error(), statusCode)
http.Error(w, grpc.ErrorDesc(err), statusCode)
}
}
}

View File

@ -2,7 +2,6 @@ package network
import (
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/libnetwork"
)
@ -13,7 +12,7 @@ type Backend interface {
FindNetwork(idName string) (libnetwork.Network, error)
GetNetworkByName(idName string) (libnetwork.Network, error)
GetNetworksByID(partialID string) []libnetwork.Network
FilterNetworks(netFilters filters.Args) ([]libnetwork.Network, error)
GetNetworks() []libnetwork.Network
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error

View File

@ -0,0 +1,98 @@
package network
import (
"fmt"
"github.com/docker/docker/runconfig"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
)
type filterHandler func([]types.NetworkResource, string) ([]types.NetworkResource, error)
var (
// AcceptedFilters is an acceptable filters for validation
AcceptedFilters = map[string]bool{
"driver": true,
"type": true,
"name": true,
"id": true,
"label": true,
}
)
func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) {
switch netType {
case "builtin":
for _, nw := range nws {
if runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
case "custom":
for _, nw := range nws {
if !runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
default:
return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType)
}
return retNws, nil
}
// filterNetworks filters network list according to user specified filter
// and returns user chosen networks
func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
// if filter is empty, return original network list
if filter.Len() == 0 {
return nws, nil
}
if err := filter.Validate(AcceptedFilters); err != nil {
return nil, err
}
var displayNet []types.NetworkResource
for _, nw := range nws {
if filter.Include("driver") {
if !filter.ExactMatch("driver", nw.Driver) {
continue
}
}
if filter.Include("name") {
if !filter.Match("name", nw.Name) {
continue
}
}
if filter.Include("id") {
if !filter.Match("id", nw.ID) {
continue
}
}
if filter.Include("label") {
if !filter.MatchKVList("label", nw.Labels) {
continue
}
}
displayNet = append(displayNet, nw)
}
if filter.Include("type") {
var typeNet []types.NetworkResource
errFilter := filter.WalkValues("type", func(fval string) error {
passList, err := filterNetworkByType(displayNet, fval)
if err != nil {
return err
}
typeNet = append(typeNet, passList...)
return nil
})
if errFilter != nil {
return nil, errFilter
}
displayNet = typeNet
}
return displayNet, nil
}

View File

@ -1,17 +1,22 @@
package network
import "github.com/docker/docker/api/server/router"
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/daemon/cluster"
)
// networkRouter is a router to talk with the network controller
type networkRouter struct {
backend Backend
routes []router.Route
backend Backend
clusterProvider *cluster.Cluster
routes []router.Route
}
// NewRouter initializes a new network router
func NewRouter(b Backend) router.Router {
func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &networkRouter{
backend: b,
backend: b,
clusterProvider: c,
}
r.initRoutes()
return r

View File

@ -24,17 +24,30 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
return err
}
list := []*types.NetworkResource{}
list := []types.NetworkResource{}
nwList, err := n.backend.FilterNetworks(netFilters)
if nr, err := n.clusterProvider.GetNetworks(); err == nil {
for _, nw := range nr {
list = append(list, nw)
}
}
// Combine the network list returned by Docker daemon if it is not already
// returned by the cluster manager
SKIP:
for _, nw := range n.backend.GetNetworks() {
for _, nl := range list {
if nl.ID == nw.ID() {
continue SKIP
}
}
list = append(list, *n.buildNetworkResource(nw))
}
list, err = filterNetworks(list, netFilters)
if err != nil {
return err
}
for _, nw := range nwList {
list = append(list, buildNetworkResource(nw))
}
return httputils.WriteJSON(w, http.StatusOK, list)
}
@ -45,9 +58,12 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
nw, err := n.backend.FindNetwork(vars["id"])
if err != nil {
if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
return httputils.WriteJSON(w, http.StatusOK, nr)
}
return err
}
return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw))
return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw))
}
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -67,7 +83,14 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
nw, err := n.backend.CreateNetwork(create)
if err != nil {
return err
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
return err
}
id, err := n.clusterProvider.CreateNetwork(create)
if err != nil {
return err
}
nw = &types.NetworkCreateResponse{ID: id}
}
return httputils.WriteJSON(w, http.StatusCreated, nw)
@ -121,6 +144,9 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
if err := httputils.ParseForm(r); err != nil {
return err
}
if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
return n.clusterProvider.RemoveNetwork(vars["id"])
}
if err := n.backend.DeleteNetwork(vars["id"]); err != nil {
return err
}
@ -128,7 +154,7 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
return nil
}
func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r := &types.NetworkResource{}
if nw == nil {
return r
@ -138,6 +164,13 @@ func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r.Name = nw.Name()
r.ID = nw.ID()
r.Scope = info.Scope()
if n.clusterProvider.IsManager() {
if _, err := n.clusterProvider.GetNetwork(nw.Name()); err == nil {
r.Scope = "swarm"
}
} else if info.Dynamic() {
r.Scope = "swarm"
}
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()

View File

@ -0,0 +1,26 @@
package swarm
import (
basictypes "github.com/docker/engine-api/types"
types "github.com/docker/engine-api/types/swarm"
)
// Backend abstracts an swarm commands manager.
type Backend interface {
Init(req types.InitRequest) (string, error)
Join(req types.JoinRequest) error
Leave(force bool) error
Inspect() (types.Swarm, error)
Update(uint64, types.Spec) error
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
GetService(string) (types.Service, error)
CreateService(types.ServiceSpec) (string, error)
UpdateService(string, uint64, types.ServiceSpec) error
RemoveService(string) error
GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
GetNode(string) (types.Node, error)
UpdateNode(string, uint64, types.NodeSpec) error
RemoveNode(string) error
GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
GetTask(string) (types.Task, error)
}

View File

@ -0,0 +1,44 @@
package swarm
import "github.com/docker/docker/api/server/router"
// buildRouter is a router to talk with the build controller
type swarmRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new build router
func NewRouter(b Backend) router.Router {
r := &swarmRouter{
backend: b,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the swarm controller
func (sr *swarmRouter) Routes() []router.Route {
return sr.routes
}
func (sr *swarmRouter) initRoutes() {
sr.routes = []router.Route{
router.NewPostRoute("/swarm/init", sr.initCluster),
router.NewPostRoute("/swarm/join", sr.joinCluster),
router.NewPostRoute("/swarm/leave", sr.leaveCluster),
router.NewGetRoute("/swarm", sr.inspectCluster),
router.NewPostRoute("/swarm/update", sr.updateCluster),
router.NewGetRoute("/services", sr.getServices),
router.NewGetRoute("/services/{id:.*}", sr.getService),
router.NewPostRoute("/services/create", sr.createService),
router.NewPostRoute("/services/{id:.*}/update", sr.updateService),
router.NewDeleteRoute("/services/{id:.*}", sr.removeService),
router.NewGetRoute("/nodes", sr.getNodes),
router.NewGetRoute("/nodes/{id:.*}", sr.getNode),
router.NewDeleteRoute("/nodes/{id:.*}", sr.removeNode),
router.NewPostRoute("/nodes/{id:.*}/update", sr.updateNode),
router.NewGetRoute("/tasks", sr.getTasks),
router.NewGetRoute("/tasks/{id:.*}", sr.getTask),
}
}

View File

@ -0,0 +1,229 @@
package swarm
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
basictypes "github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
types "github.com/docker/engine-api/types/swarm"
"golang.org/x/net/context"
)
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.InitRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
nodeID, err := sr.backend.Init(req)
if err != nil {
logrus.Errorf("Error initializing swarm: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, nodeID)
}
func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.JoinRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
return sr.backend.Join(req)
}
func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
force := httputils.BoolValue(r, "force")
return sr.backend.Leave(force)
}
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
swarm, err := sr.backend.Inspect()
if err != nil {
logrus.Errorf("Error getting swarm: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, swarm)
}
func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var swarm types.Spec
if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
return err
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
}
if err := sr.backend.Update(version, swarm); err != nil {
logrus.Errorf("Error configuring swarm: %v", err)
return err
}
return nil
}
func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filter: filter})
if err != nil {
logrus.Errorf("Error getting services: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, services)
}
func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
service, err := sr.backend.GetService(vars["id"])
if err != nil {
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, service)
}
func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var service types.ServiceSpec
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
return err
}
id, err := sr.backend.CreateService(service)
if err != nil {
logrus.Errorf("Error reating service %s: %v", id, err)
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ServiceCreateResponse{
ID: id,
})
}
func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var service types.ServiceSpec
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
return err
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error())
}
if err := sr.backend.UpdateService(vars["id"], version, service); err != nil {
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := sr.backend.RemoveService(vars["id"]); err != nil {
logrus.Errorf("Error removing service %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filter: filter})
if err != nil {
logrus.Errorf("Error getting nodes: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, nodes)
}
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
node, err := sr.backend.GetNode(vars["id"])
if err != nil {
logrus.Errorf("Error getting node %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, node)
}
func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var node types.NodeSpec
if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
return err
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error())
}
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
logrus.Errorf("Error updating node %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := sr.backend.RemoveNode(vars["id"]); err != nil {
logrus.Errorf("Error removing node %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filter: filter})
if err != nil {
logrus.Errorf("Error getting tasks: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, tasks)
}
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
task, err := sr.backend.GetTask(vars["id"])
if err != nil {
logrus.Errorf("Error getting task %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, task)
}

View File

@ -1,18 +1,23 @@
package system
import "github.com/docker/docker/api/server/router"
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/daemon/cluster"
)
// systemRouter provides information about the Docker system overall.
// It gathers information about host, daemon and container events.
type systemRouter struct {
backend Backend
routes []router.Route
backend Backend
clusterProvider *cluster.Cluster
routes []router.Route
}
// NewRouter initializes a new system router
func NewRouter(b Backend) router.Router {
func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &systemRouter{
backend: b,
backend: b,
clusterProvider: c,
}
r.routes = []router.Route{

View File

@ -33,6 +33,9 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
if err != nil {
return err
}
if s.clusterProvider != nil {
info.Swarm = s.clusterProvider.Info()
}
return httputils.WriteJSON(w, http.StatusOK, info)
}

View File

@ -5,7 +5,10 @@ import (
"github.com/docker/docker/api/client/container"
"github.com/docker/docker/api/client/image"
"github.com/docker/docker/api/client/network"
"github.com/docker/docker/api/client/node"
"github.com/docker/docker/api/client/registry"
"github.com/docker/docker/api/client/service"
"github.com/docker/docker/api/client/swarm"
"github.com/docker/docker/api/client/system"
"github.com/docker/docker/api/client/volume"
"github.com/docker/docker/cli"
@ -36,6 +39,9 @@ func NewCobraAdaptor(clientFlags *cliflags.ClientFlags) CobraAdaptor {
rootCmd.SetFlagErrorFunc(cli.FlagErrorFunc)
rootCmd.SetOutput(stdout)
rootCmd.AddCommand(
node.NewNodeCommand(dockerCli),
service.NewServiceCommand(dockerCli),
swarm.NewSwarmCommand(dockerCli),
container.NewAttachCommand(dockerCli),
container.NewCommitCommand(dockerCli),
container.NewCreateCommand(dockerCli),

View File

@ -11,7 +11,7 @@ var DockerCommandUsage = []Command{
{"cp", "Copy files/folders between a container and the local filesystem"},
{"exec", "Run a command in a running container"},
{"info", "Display system-wide information"},
{"inspect", "Return low-level information on a container or image"},
{"inspect", "Return low-level information on a container, image or task"},
{"update", "Update configuration of one or more containers"},
}

View File

@ -20,12 +20,14 @@ import (
"github.com/docker/docker/api/server/router/container"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
"github.com/docker/docker/builder/dockerfile"
cliflags "github.com/docker/docker/cli/flags"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd"
@ -208,6 +210,7 @@ func (cli *DaemonCli) start() (err error) {
}
api := apiserver.New(serverConfig)
cli.api = api
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
@ -264,6 +267,17 @@ func (cli *DaemonCli) start() (err error) {
return fmt.Errorf("Error starting daemon: %v", err)
}
name, _ := os.Hostname()
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
})
if err != nil {
logrus.Fatalf("Error creating cluster component: %v", err)
}
logrus.Info("Daemon has completed initialization")
logrus.WithFields(logrus.Fields{
@ -273,7 +287,7 @@ func (cli *DaemonCli) start() (err error) {
}).Info("Docker daemon")
cli.initMiddlewares(api, serverConfig)
initRouter(api, d)
initRouter(api, d, c)
cli.d = d
cli.setupConfigReloadTrap()
@ -290,6 +304,7 @@ func (cli *DaemonCli) start() (err error) {
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
shutdownDaemon(d, 15)
containerdRemote.Cleanup()
if errAPI != nil {
@ -385,18 +400,19 @@ func loadDaemonCliConfig(config *daemon.Config, flags *flag.FlagSet, commonConfi
return config, nil
}
func initRouter(s *apiserver.Server, d *daemon.Daemon) {
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
systemrouter.NewRouter(d),
systemrouter.NewRouter(d, c),
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
swarmrouter.NewRouter(c),
}
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d))
routers = append(routers, network.NewRouter(d, c))
}
s.InitRouter(utils.IsDebugEnabled(), routers...)

View File

@ -66,6 +66,7 @@ type CommonContainer struct {
RWLayer layer.RWLayer `json:"-"`
ID string
Created time.Time
Managed bool
Path string
Args []string
Config *containertypes.Config
@ -790,7 +791,7 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epC
ipam := epConfig.IPAMConfig
if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "") {
createOptions = append(createOptions,
libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil))
libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil, nil))
}
for _, alias := range epConfig.Aliases {
@ -798,6 +799,27 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epC
}
}
if container.NetworkSettings.Service != nil {
svcCfg := container.NetworkSettings.Service
var vip string
if svcCfg.VirtualAddresses[n.ID()] != nil {
vip = svcCfg.VirtualAddresses[n.ID()].IPv4
}
var portConfigs []*libnetwork.PortConfig
for _, portConfig := range svcCfg.ExposedPorts {
portConfigs = append(portConfigs, &libnetwork.PortConfig{
Name: portConfig.Name,
Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs))
}
if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
}

View File

@ -5,6 +5,8 @@ import (
"sync"
"time"
"golang.org/x/net/context"
"github.com/docker/go-units"
)
@ -139,6 +141,32 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) {
return s.getExitCode(), nil
}
// WaitWithContext waits for the container to stop. Optional context can be
// passed for canceling the request.
func (s *State) WaitWithContext(ctx context.Context) <-chan int {
// todo(tonistiigi): make other wait functions use this
c := make(chan int)
go func() {
s.Lock()
if !s.Running {
exitCode := s.ExitCode
s.Unlock()
c <- exitCode
close(c)
return
}
waitChan := s.waitChan
s.Unlock()
select {
case <-waitChan:
c <- s.getExitCode()
case <-ctx.Done():
}
close(c)
}()
return c
}
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
func (s *State) IsRunning() bool {
s.Lock()

1056
daemon/cluster/cluster.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
package convert
import (
"fmt"
"strings"
types "github.com/docker/engine-api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/protobuf/ptypes"
)
func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
containerSpec := types.ContainerSpec{
Image: c.Image,
Labels: c.Labels,
Command: c.Command,
Args: c.Args,
Env: c.Env,
Dir: c.Dir,
User: c.User,
}
// Mounts
for _, m := range c.Mounts {
mount := types.Mount{
Target: m.Target,
Source: m.Source,
Type: types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])),
Writable: m.Writable,
}
if m.BindOptions != nil {
mount.BindOptions = &types.BindOptions{
Propagation: types.MountPropagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])),
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &types.VolumeOptions{
Populate: m.VolumeOptions.Populate,
Labels: m.VolumeOptions.Labels,
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &types.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
Options: m.VolumeOptions.DriverConfig.Options,
}
}
}
containerSpec.Mounts = append(containerSpec.Mounts, mount)
}
if c.StopGracePeriod != nil {
grace, _ := ptypes.Duration(c.StopGracePeriod)
containerSpec.StopGracePeriod = &grace
}
return containerSpec
}
func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
containerSpec := &swarmapi.ContainerSpec{
Image: c.Image,
Labels: c.Labels,
Command: c.Command,
Args: c.Args,
Env: c.Env,
Dir: c.Dir,
User: c.User,
}
if c.StopGracePeriod != nil {
containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod)
}
// Mounts
for _, m := range c.Mounts {
mount := swarmapi.Mount{
Target: m.Target,
Source: m.Source,
Writable: m.Writable,
}
if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
mount.Type = swarmapi.Mount_MountType(mountType)
} else if string(m.Type) != "" {
return nil, fmt.Errorf("invalid MountType: %q", m.Type)
}
if m.BindOptions != nil {
if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok {
mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)}
} else if string(m.BindOptions.Propagation) != "" {
return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation)
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
Populate: m.VolumeOptions.Populate,
Labels: m.VolumeOptions.Labels,
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
Options: m.VolumeOptions.DriverConfig.Options,
}
}
}
containerSpec.Mounts = append(containerSpec.Mounts, mount)
}
return containerSpec, nil
}

View File

@ -0,0 +1,194 @@
package convert
import (
"strings"
basictypes "github.com/docker/engine-api/types"
networktypes "github.com/docker/engine-api/types/network"
types "github.com/docker/engine-api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/protobuf/ptypes"
)
func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
if na != nil {
return types.NetworkAttachment{
Network: networkFromGRPC(na.Network),
Addresses: na.Addresses,
}
}
return types.NetworkAttachment{}
}
func networkFromGRPC(n *swarmapi.Network) types.Network {
if n != nil {
network := types.Network{
ID: n.ID,
Spec: types.NetworkSpec{
IPv6Enabled: n.Spec.Ipv6Enabled,
Internal: n.Spec.Internal,
IPAMOptions: ipamFromGRPC(n.Spec.IPAM),
},
IPAMOptions: ipamFromGRPC(n.IPAM),
}
// Meta
network.Version.Index = n.Meta.Version.Index
network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
//Annotations
network.Spec.Name = n.Spec.Annotations.Name
network.Spec.Labels = n.Spec.Annotations.Labels
//DriverConfiguration
if n.Spec.DriverConfig != nil {
network.Spec.DriverConfiguration = &types.Driver{
Name: n.Spec.DriverConfig.Name,
Options: n.Spec.DriverConfig.Options,
}
}
//DriverState
if n.DriverState != nil {
network.DriverState = types.Driver{
Name: n.DriverState.Name,
Options: n.DriverState.Options,
}
}
return network
}
return types.Network{}
}
func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions {
var ipam *types.IPAMOptions
if i != nil {
ipam = &types.IPAMOptions{}
if i.Driver != nil {
ipam.Driver.Name = i.Driver.Name
ipam.Driver.Options = i.Driver.Options
}
for _, config := range i.Configs {
ipam.Configs = append(ipam.Configs, types.IPAMConfig{
Subnet: config.Subnet,
Range: config.Range,
Gateway: config.Gateway,
})
}
}
return ipam
}
func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec {
var endpointSpec *types.EndpointSpec
if es != nil {
endpointSpec = &types.EndpointSpec{}
endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String()))
for _, portState := range es.Ports {
endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{
Name: portState.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
TargetPort: portState.TargetPort,
PublishedPort: portState.PublishedPort,
})
}
}
return endpointSpec
}
func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
endpoint := types.Endpoint{}
if e != nil {
if espec := endpointSpecFromGRPC(e.Spec); espec != nil {
endpoint.Spec = *espec
}
for _, portState := range e.Ports {
endpoint.Ports = append(endpoint.Ports, types.PortConfig{
Name: portState.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
TargetPort: portState.TargetPort,
PublishedPort: portState.PublishedPort,
})
}
for _, v := range e.VirtualIPs {
endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{
NetworkID: v.NetworkID,
Addr: v.Addr})
}
}
return endpoint
}
// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource.
func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource {
spec := n.Spec
var ipam networktypes.IPAM
if spec.IPAM != nil {
if spec.IPAM.Driver != nil {
ipam.Driver = spec.IPAM.Driver.Name
ipam.Options = spec.IPAM.Driver.Options
}
ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs))
for _, ic := range spec.IPAM.Configs {
ipamConfig := networktypes.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
AuxAddress: ic.Reserved,
}
ipam.Config = append(ipam.Config, ipamConfig)
}
}
return basictypes.NetworkResource{
ID: n.ID,
Name: n.Spec.Annotations.Name,
Scope: "swarm",
Driver: n.DriverState.Name,
EnableIPv6: spec.Ipv6Enabled,
IPAM: ipam,
Internal: spec.Internal,
Options: n.DriverState.Options,
Labels: n.Spec.Annotations.Labels,
}
}
// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec.
func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec {
ns := swarmapi.NetworkSpec{
Annotations: swarmapi.Annotations{
Name: create.Name,
Labels: create.Labels,
},
DriverConfig: &swarmapi.Driver{
Name: create.Driver,
Options: create.Options,
},
Ipv6Enabled: create.EnableIPv6,
Internal: create.Internal,
IPAM: &swarmapi.IPAMOptions{
Driver: &swarmapi.Driver{
Name: create.IPAM.Driver,
Options: create.IPAM.Options,
},
},
}
ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config))
for _, ipamConfig := range create.IPAM.Config {
ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{
Subnet: ipamConfig.Subnet,
Range: ipamConfig.IPRange,
Gateway: ipamConfig.Gateway,
})
}
ns.IPAM.Configs = ipamSpec
return ns
}

View File

@ -0,0 +1,95 @@
package convert
import (
"fmt"
"strings"
types "github.com/docker/engine-api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/protobuf/ptypes"
)
// NodeFromGRPC converts a grpc Node to a Node.
func NodeFromGRPC(n swarmapi.Node) types.Node {
node := types.Node{
ID: n.ID,
Spec: types.NodeSpec{
Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())),
Membership: types.NodeMembership(strings.ToLower(n.Spec.Membership.String())),
Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())),
},
Status: types.NodeStatus{
State: types.NodeState(strings.ToLower(n.Status.State.String())),
Message: n.Status.Message,
},
}
// Meta
node.Version.Index = n.Meta.Version.Index
node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
//Annotations
node.Spec.Name = n.Spec.Annotations.Name
node.Spec.Labels = n.Spec.Annotations.Labels
//Description
if n.Description != nil {
node.Description.Hostname = n.Description.Hostname
if n.Description.Platform != nil {
node.Description.Platform.Architecture = n.Description.Platform.Architecture
node.Description.Platform.OS = n.Description.Platform.OS
}
if n.Description.Resources != nil {
node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs
node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes
}
if n.Description.Engine != nil {
node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion
node.Description.Engine.Labels = n.Description.Engine.Labels
for _, plugin := range n.Description.Engine.Plugins {
node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name})
}
}
}
//Manager
if n.ManagerStatus != nil {
node.ManagerStatus = &types.ManagerStatus{
Leader: n.ManagerStatus.Raft.Status.Leader,
Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Raft.Status.Reachability.String())),
Addr: n.ManagerStatus.Raft.Addr,
}
}
return node
}
// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec.
func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) {
spec := swarmapi.NodeSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
}
if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok {
spec.Role = swarmapi.NodeRole(role)
} else {
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
}
if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(string(s.Membership))]; ok {
spec.Membership = swarmapi.NodeSpec_Membership(membership)
} else {
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Membership: %q", s.Membership)
}
if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
spec.Availability = swarmapi.NodeSpec_Availability(availability)
} else {
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability)
}
return spec, nil
}

View File

@ -0,0 +1,252 @@
package convert
import (
"fmt"
"strings"
"github.com/docker/docker/pkg/namesgenerator"
types "github.com/docker/engine-api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/protobuf/ptypes"
)
// ServiceFromGRPC converts a grpc Service to a Service.
func ServiceFromGRPC(s swarmapi.Service) types.Service {
spec := s.Spec
containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container
networks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks))
for _, n := range spec.Networks {
networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
}
service := types.Service{
ID: s.ID,
Spec: types.ServiceSpec{
TaskTemplate: types.TaskSpec{
ContainerSpec: containerSpecFromGRPC(containerConfig),
Resources: resourcesFromGRPC(s.Spec.Task.Resources),
RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart),
Placement: placementFromGRPC(s.Spec.Task.Placement),
},
Networks: networks,
EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint),
},
Endpoint: endpointFromGRPC(s.Endpoint),
}
// Meta
service.Version.Index = s.Meta.Version.Index
service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt)
service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt)
// Annotations
service.Spec.Name = s.Spec.Annotations.Name
service.Spec.Labels = s.Spec.Annotations.Labels
// UpdateConfig
if s.Spec.Update != nil {
service.Spec.UpdateConfig = &types.UpdateConfig{
Parallelism: s.Spec.Update.Parallelism,
}
service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay)
}
//Mode
switch t := s.Spec.GetMode().(type) {
case *swarmapi.ServiceSpec_Global:
service.Spec.Mode.Global = &types.GlobalService{}
case *swarmapi.ServiceSpec_Replicated:
service.Spec.Mode.Replicated = &types.ReplicatedService{
Replicas: &t.Replicated.Replicas,
}
}
return service
}
// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec.
func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
name := s.Name
if name == "" {
name = namesgenerator.GetRandomName(0)
}
networks := make([]*swarmapi.ServiceSpec_NetworkAttachmentConfig, 0, len(s.Networks))
for _, n := range s.Networks {
networks = append(networks, &swarmapi.ServiceSpec_NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
}
spec := swarmapi.ServiceSpec{
Annotations: swarmapi.Annotations{
Name: name,
Labels: s.Labels,
},
Task: swarmapi.TaskSpec{
Resources: resourcesToGRPC(s.TaskTemplate.Resources),
},
Networks: networks,
}
containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Restart = restartPolicy
if s.TaskTemplate.Placement != nil {
spec.Task.Placement = &swarmapi.Placement{
Constraints: s.TaskTemplate.Placement.Constraints,
}
}
if s.UpdateConfig != nil {
spec.Update = &swarmapi.UpdateConfig{
Parallelism: s.UpdateConfig.Parallelism,
Delay: *ptypes.DurationProto(s.UpdateConfig.Delay),
}
}
if s.EndpointSpec != nil {
if s.EndpointSpec.Mode != "" &&
s.EndpointSpec.Mode != types.ResolutionModeVIP &&
s.EndpointSpec.Mode != types.ResolutionModeDNSRR {
return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode)
}
spec.Endpoint = &swarmapi.EndpointSpec{}
spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))])
for _, portConfig := range s.EndpointSpec.Ports {
spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{
Name: portConfig.Name,
Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
}
//Mode
if s.Mode.Global != nil {
spec.Mode = &swarmapi.ServiceSpec_Global{
Global: &swarmapi.GlobalService{},
}
} else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil {
spec.Mode = &swarmapi.ServiceSpec_Replicated{
Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas},
}
} else {
spec.Mode = &swarmapi.ServiceSpec_Replicated{
Replicated: &swarmapi.ReplicatedService{Replicas: 1},
}
}
return spec, nil
}
func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements {
var resources *types.ResourceRequirements
if res != nil {
resources = &types.ResourceRequirements{}
if res.Limits != nil {
resources.Limits = &types.Resources{
NanoCPUs: res.Limits.NanoCPUs,
MemoryBytes: res.Limits.MemoryBytes,
}
}
if res.Reservations != nil {
resources.Reservations = &types.Resources{
NanoCPUs: res.Reservations.NanoCPUs,
MemoryBytes: res.Reservations.MemoryBytes,
}
}
}
return resources
}
func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements {
var reqs *swarmapi.ResourceRequirements
if res != nil {
reqs = &swarmapi.ResourceRequirements{}
if res.Limits != nil {
reqs.Limits = &swarmapi.Resources{
NanoCPUs: res.Limits.NanoCPUs,
MemoryBytes: res.Limits.MemoryBytes,
}
}
if res.Reservations != nil {
reqs.Reservations = &swarmapi.Resources{
NanoCPUs: res.Reservations.NanoCPUs,
MemoryBytes: res.Reservations.MemoryBytes,
}
}
}
return reqs
}
func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy {
var rp *types.RestartPolicy
if p != nil {
rp = &types.RestartPolicy{}
rp.Condition = types.RestartPolicyCondition(strings.ToLower(p.Condition.String()))
if p.Delay != nil {
delay, _ := ptypes.Duration(p.Delay)
rp.Delay = &delay
}
if p.Window != nil {
window, _ := ptypes.Duration(p.Window)
rp.Window = &window
}
rp.MaxAttempts = &p.MaxAttempts
}
return rp
}
func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) {
var rp *swarmapi.RestartPolicy
if p != nil {
rp = &swarmapi.RestartPolicy{}
if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[strings.ToUpper(string(p.Condition))]; ok {
rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition)
} else if string(p.Condition) == "" {
rp.Condition = swarmapi.RestartOnAny
} else {
return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition)
}
if p.Delay != nil {
rp.Delay = ptypes.DurationProto(*p.Delay)
}
if p.Window != nil {
rp.Window = ptypes.DurationProto(*p.Window)
}
if p.MaxAttempts != nil {
rp.MaxAttempts = *p.MaxAttempts
}
}
return rp, nil
}
func placementFromGRPC(p *swarmapi.Placement) *types.Placement {
var r *types.Placement
if p != nil {
r = &types.Placement{}
r.Constraints = p.Constraints
}
return r
}

View File

@ -0,0 +1,116 @@
package convert
import (
"fmt"
"strings"
"golang.org/x/crypto/bcrypt"
types "github.com/docker/engine-api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/protobuf/ptypes"
)
// SwarmFromGRPC converts a grpc Cluster to a Swarm.
func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
swarm := types.Swarm{
ID: c.ID,
Spec: types.Spec{
Orchestration: types.OrchestrationConfig{
TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit,
},
Raft: types.RaftConfig{
SnapshotInterval: c.Spec.Raft.SnapshotInterval,
KeepOldSnapshots: c.Spec.Raft.KeepOldSnapshots,
LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers,
HeartbeatTick: c.Spec.Raft.HeartbeatTick,
ElectionTick: c.Spec.Raft.ElectionTick,
},
Dispatcher: types.DispatcherConfig{
HeartbeatPeriod: c.Spec.Dispatcher.HeartbeatPeriod,
},
},
}
swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry)
// Meta
swarm.Version.Index = c.Meta.Version.Index
swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt)
swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt)
// Annotations
swarm.Spec.Name = c.Spec.Annotations.Name
swarm.Spec.Labels = c.Spec.Annotations.Labels
for _, policy := range c.Spec.AcceptancePolicy.Policies {
p := types.Policy{
Role: types.NodeRole(strings.ToLower(policy.Role.String())),
Autoaccept: policy.Autoaccept,
}
if policy.Secret != nil {
p.Secret = string(policy.Secret.Data)
}
swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p)
}
return swarm
}
// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
spec := swarmapi.ClusterSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
Orchestration: swarmapi.OrchestrationConfig{
TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit,
},
Raft: swarmapi.RaftConfig{
SnapshotInterval: s.Raft.SnapshotInterval,
KeepOldSnapshots: s.Raft.KeepOldSnapshots,
LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers,
HeartbeatTick: s.Raft.HeartbeatTick,
ElectionTick: s.Raft.ElectionTick,
},
Dispatcher: swarmapi.DispatcherConfig{
HeartbeatPeriod: s.Dispatcher.HeartbeatPeriod,
},
CAConfig: swarmapi.CAConfig{
NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry),
},
}
if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy); err != nil {
return swarmapi.ClusterSpec{}, err
}
return spec, nil
}
// SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy.
func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy) error {
spec.AcceptancePolicy.Policies = nil
for _, p := range acceptancePolicy.Policies {
role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))]
if !ok {
return fmt.Errorf("invalid Role: %q", p.Role)
}
policy := &swarmapi.AcceptancePolicy_RoleAdmissionPolicy{
Role: swarmapi.NodeRole(role),
Autoaccept: p.Autoaccept,
}
if p.Secret != "" {
hashPwd, _ := bcrypt.GenerateFromPassword([]byte(p.Secret), 0)
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
Data: hashPwd,
Alg: "bcrypt",
}
}
spec.AcceptancePolicy.Policies = append(spec.AcceptancePolicy.Policies, policy)
}
return nil
}

View File

@ -0,0 +1,53 @@
package convert
import (
"strings"
types "github.com/docker/engine-api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/protobuf/ptypes"
)
// TaskFromGRPC converts a grpc Task to a Task.
func TaskFromGRPC(t swarmapi.Task) types.Task {
containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container
containerStatus := t.Status.GetContainer()
task := types.Task{
ID: t.ID,
ServiceID: t.ServiceID,
Slot: int(t.Slot),
NodeID: t.NodeID,
Spec: types.TaskSpec{
ContainerSpec: containerSpecFromGRPC(containerConfig),
Resources: resourcesFromGRPC(t.Spec.Resources),
RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart),
Placement: placementFromGRPC(t.Spec.Placement),
},
Status: types.TaskStatus{
State: types.TaskState(strings.ToLower(t.Status.State.String())),
Message: t.Status.Message,
Err: t.Status.Err,
},
DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())),
}
// Meta
task.Version.Index = t.Meta.Version.Index
task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt)
task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt)
task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp)
if containerStatus != nil {
task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID
task.Status.ContainerStatus.PID = int(containerStatus.PID)
task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode)
}
// NetworksAttachments
for _, na := range t.Networks {
task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na))
}
return task
}

View File

@ -0,0 +1,35 @@
package executor
import (
"io"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
"github.com/docker/libnetwork/cluster"
networktypes "github.com/docker/libnetwork/types"
"golang.org/x/net/context"
)
// Backend defines the executor component for a swarm agent.
type Backend interface {
CreateManagedNetwork(clustertypes.NetworkCreateRequest) error
DeleteManagedNetwork(name string) error
SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error
PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
CreateManagedContainer(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
ContainerStart(name string, hostConfig *container.HostConfig) error
ContainerStop(name string, seconds int) error
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error
ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error)
ContainerWaitWithContext(ctx context.Context, name string) (<-chan int, error)
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerKill(name string, sig uint64) error
SystemInfo() (*types.Info, error)
VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
ListContainersForNode(nodeID string) []string
SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error
SetClusterProvider(provider cluster.Provider)
}

View File

@ -0,0 +1,229 @@
package container
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/engine-api/types"
"github.com/docker/libnetwork"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"golang.org/x/net/context"
)
// containerAdapter conducts remote operations for a container. All calls
// are mostly naked calls to the client API, seeded with information from
// containerConfig.
type containerAdapter struct {
backend executorpkg.Backend
container *containerConfig
}
func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapter, error) {
ctnr, err := newContainerConfig(task)
if err != nil {
return nil, err
}
return &containerAdapter{
container: ctnr,
backend: b,
}, nil
}
func (c *containerAdapter) pullImage(ctx context.Context) error {
// if the image needs to be pulled, the auth config will be retrieved and updated
encodedAuthConfig := c.container.task.ServiceAnnotations.Labels[fmt.Sprintf("%v.registryauth", systemLabelPrefix)]
authConfig := &types.AuthConfig{}
if encodedAuthConfig != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
pr, pw := io.Pipe()
metaHeaders := map[string][]string{}
go func() {
err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw)
pw.CloseWithError(err)
}()
dec := json.NewDecoder(pr)
m := map[string]interface{}{}
for {
if err := dec.Decode(&m); err != nil {
if err == io.EOF {
break
}
return err
}
// TOOD(stevvooe): Report this status somewhere.
logrus.Debugln("pull progress", m)
}
// if the final stream object contained an error, return it
if errMsg, ok := m["error"]; ok {
return fmt.Errorf("%v", errMsg)
}
return nil
}
func (c *containerAdapter) createNetworks(ctx context.Context) error {
for _, network := range c.container.networks() {
ncr, err := c.container.networkCreateRequest(network)
if err != nil {
return err
}
if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
if _, ok := err.(libnetwork.NetworkNameError); ok {
continue
}
return err
}
}
return nil
}
func (c *containerAdapter) removeNetworks(ctx context.Context) error {
for _, nid := range c.container.networks() {
if err := c.backend.DeleteManagedNetwork(nid); err != nil {
if _, ok := err.(*libnetwork.ActiveEndpointsError); ok {
continue
}
log.G(ctx).Errorf("network %s remove failed: %v", nid, err)
return err
}
}
return nil
}
func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error {
var cr types.ContainerCreateResponse
var err error
if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{
Name: c.container.name(),
Config: c.container.config(),
HostConfig: c.container.hostConfig(),
// Use the first network in container create
NetworkingConfig: c.container.createNetworkingConfig(),
}); err != nil {
return err
}
// Docker daemon currently doesnt support multiple networks in container create
// Connect to all other networks
nc := c.container.connectNetworkingConfig()
if nc != nil {
for n, ep := range nc.EndpointsConfig {
logrus.Errorf("CONNECT %s : %v", n, ep.IPAMConfig.IPv4Address)
if err := backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
return err
}
}
}
if err := backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil {
return err
}
return nil
}
func (c *containerAdapter) start(ctx context.Context) error {
return c.backend.ContainerStart(c.container.name(), nil)
}
func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
if ctx.Err() != nil {
return types.ContainerJSON{}, ctx.Err()
}
if err != nil {
return types.ContainerJSON{}, err
}
return *cs, nil
}
// events issues a call to the events API and returns a channel with all
// events. The stream of events can be shutdown by cancelling the context.
//
// A chan struct{} is returned that will be closed if the event procressing
// fails and needs to be restarted.
func (c *containerAdapter) wait(ctx context.Context) (<-chan int, error) {
return c.backend.ContainerWaitWithContext(ctx, c.container.name())
}
func (c *containerAdapter) shutdown(ctx context.Context) error {
// Default stop grace period to 10s.
stopgrace := 10
spec := c.container.spec()
if spec.StopGracePeriod != nil {
stopgrace = int(spec.StopGracePeriod.Seconds)
}
return c.backend.ContainerStop(c.container.name(), stopgrace)
}
func (c *containerAdapter) terminate(ctx context.Context) error {
return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL))
}
func (c *containerAdapter) remove(ctx context.Context) error {
return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
RemoveVolume: true,
ForceRemove: true,
})
}
func (c *containerAdapter) createVolumes(ctx context.Context, backend executorpkg.Backend) error {
// Create plugin volumes that are embedded inside a Mount
for _, mount := range c.container.task.Spec.GetContainer().Mounts {
if mount.Type != api.MountTypeVolume {
continue
}
if mount.VolumeOptions != nil {
continue
}
if mount.VolumeOptions.DriverConfig == nil {
continue
}
req := c.container.volumeCreateRequest(&mount)
// Check if this volume exists on the engine
if _, err := backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil {
// TODO(amitshukla): Today, volume create through the engine api does not return an error
// when the named volume with the same parameters already exists.
// It returns an error if the driver name is different - that is a valid error
return err
}
}
return nil
}
// todo: typed/wrapped errors
func isContainerCreateNameConflict(err error) bool {
return strings.Contains(err.Error(), "Conflict. The name")
}
func isUnknownContainer(err error) bool {
return strings.Contains(err.Error(), "No such container:")
}
func isStoppedContainer(err error) bool {
return strings.Contains(err.Error(), "is already stopped")
}

View File

@ -0,0 +1,415 @@
package container
import (
"errors"
"fmt"
"log"
"net"
"strings"
"time"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/reference"
"github.com/docker/engine-api/types"
enginecontainer "github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
)
const (
// Explictly use the kernel's default setting for CPU quota of 100ms.
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
cpuQuotaPeriod = 100 * time.Millisecond
// systemLabelPrefix represents the reserved namespace for system labels.
systemLabelPrefix = "com.docker.swarm"
)
// containerConfig converts task properties into docker container compatible
// components.
type containerConfig struct {
task *api.Task
networksAttachments map[string]*api.NetworkAttachment
}
// newContainerConfig returns a validated container config. No methods should
// return an error if this function returns without error.
func newContainerConfig(t *api.Task) (*containerConfig, error) {
var c containerConfig
return &c, c.setTask(t)
}
func (c *containerConfig) setTask(t *api.Task) error {
container := t.Spec.GetContainer()
if container == nil {
return exec.ErrRuntimeUnsupported
}
if container.Image == "" {
return ErrImageRequired
}
// index the networks by name
c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
for _, attachment := range t.Networks {
c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
}
c.task = t
return nil
}
func (c *containerConfig) endpoint() *api.Endpoint {
return c.task.Endpoint
}
func (c *containerConfig) spec() *api.ContainerSpec {
return c.task.Spec.GetContainer()
}
func (c *containerConfig) name() string {
if c.task.Annotations.Name != "" {
// if set, use the container Annotations.Name field, set in the orchestrator.
return c.task.Annotations.Name
}
// fallback to service.slot.id.
return strings.Join([]string{c.task.ServiceAnnotations.Name, fmt.Sprint(c.task.Slot), c.task.ID}, ".")
}
func (c *containerConfig) image() string {
raw := c.spec().Image
ref, err := reference.ParseNamed(raw)
if err != nil {
return raw
}
return reference.WithDefaultTag(ref).String()
}
func (c *containerConfig) volumes() map[string]struct{} {
r := make(map[string]struct{})
for _, mount := range c.spec().Mounts {
// pick off all the volume mounts.
if mount.Type != api.MountTypeVolume {
continue
}
r[fmt.Sprintf("%s:%s", mount.Target, getMountMask(&mount))] = struct{}{}
}
return r
}
func (c *containerConfig) config() *enginecontainer.Config {
config := &enginecontainer.Config{
Labels: c.labels(),
User: c.spec().User,
Env: c.spec().Env,
WorkingDir: c.spec().Dir,
Image: c.image(),
Volumes: c.volumes(),
}
if len(c.spec().Command) > 0 {
// If Command is provided, we replace the whole invocation with Command
// by replacing Entrypoint and specifying Cmd. Args is ignored in this
// case.
config.Entrypoint = append(config.Entrypoint, c.spec().Command[0])
config.Cmd = append(config.Cmd, c.spec().Command[1:]...)
} else if len(c.spec().Args) > 0 {
// In this case, we assume the image has an Entrypoint and Args
// specifies the arguments for that entrypoint.
config.Cmd = c.spec().Args
}
return config
}
func (c *containerConfig) labels() map[string]string {
var (
system = map[string]string{
"task": "", // mark as cluster task
"task.id": c.task.ID,
"task.name": fmt.Sprintf("%v.%v", c.task.ServiceAnnotations.Name, c.task.Slot),
"node.id": c.task.NodeID,
"service.id": c.task.ServiceID,
"service.name": c.task.ServiceAnnotations.Name,
}
labels = make(map[string]string)
)
// base labels are those defined in the spec.
for k, v := range c.spec().Labels {
labels[k] = v
}
// we then apply the overrides from the task, which may be set via the
// orchestrator.
for k, v := range c.task.Annotations.Labels {
labels[k] = v
}
// finally, we apply the system labels, which override all labels.
for k, v := range system {
labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
}
return labels
}
func (c *containerConfig) bindMounts() []string {
var r []string
for _, val := range c.spec().Mounts {
mask := getMountMask(&val)
if val.Type == api.MountTypeBind {
r = append(r, fmt.Sprintf("%s:%s:%s", val.Source, val.Target, mask))
}
}
return r
}
func getMountMask(m *api.Mount) string {
maskOpts := []string{"ro"}
if m.Writable {
maskOpts[0] = "rw"
}
if m.BindOptions != nil {
switch m.BindOptions.Propagation {
case api.MountPropagationPrivate:
maskOpts = append(maskOpts, "private")
case api.MountPropagationRPrivate:
maskOpts = append(maskOpts, "rprivate")
case api.MountPropagationShared:
maskOpts = append(maskOpts, "shared")
case api.MountPropagationRShared:
maskOpts = append(maskOpts, "rshared")
case api.MountPropagationSlave:
maskOpts = append(maskOpts, "slave")
case api.MountPropagationRSlave:
maskOpts = append(maskOpts, "rslave")
}
}
if m.VolumeOptions != nil {
if !m.VolumeOptions.Populate {
maskOpts = append(maskOpts, "nocopy")
}
}
return strings.Join(maskOpts, ",")
}
func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
return &enginecontainer.HostConfig{
Resources: c.resources(),
Binds: c.bindMounts(),
}
}
// This handles the case of volumes that are defined inside a service Mount
func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest {
var (
driverName string
driverOpts map[string]string
labels map[string]string
)
if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
driverName = mount.VolumeOptions.DriverConfig.Name
driverOpts = mount.VolumeOptions.DriverConfig.Options
labels = mount.VolumeOptions.Labels
}
if mount.VolumeOptions != nil {
return &types.VolumeCreateRequest{
Name: mount.Source,
Driver: driverName,
DriverOpts: driverOpts,
Labels: labels,
}
}
return nil
}
func (c *containerConfig) resources() enginecontainer.Resources {
resources := enginecontainer.Resources{}
// If no limits are specified let the engine use its defaults.
//
// TODO(aluzzardi): We might want to set some limits anyway otherwise
// "unlimited" tasks will step over the reservation of other tasks.
r := c.task.Spec.Resources
if r == nil || r.Limits == nil {
return resources
}
if r.Limits.MemoryBytes > 0 {
resources.Memory = r.Limits.MemoryBytes
}
if r.Limits.NanoCPUs > 0 {
// CPU Period must be set in microseconds.
resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
}
return resources
}
// Docker daemon supports just 1 network during container create.
func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig {
var networks []*api.NetworkAttachment
if c.task.Spec.GetContainer() != nil {
networks = c.task.Networks
}
epConfig := make(map[string]*network.EndpointSettings)
if len(networks) > 0 {
epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0])
}
return &network.NetworkingConfig{EndpointsConfig: epConfig}
}
// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig {
var networks []*api.NetworkAttachment
if c.task.Spec.GetContainer() != nil {
networks = c.task.Networks
}
// First network is used during container create. Other networks are used in "docker network connect"
if len(networks) < 2 {
return nil
}
epConfig := make(map[string]*network.EndpointSettings)
for _, na := range networks[1:] {
epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na)
}
return &network.NetworkingConfig{EndpointsConfig: epConfig}
}
func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings {
var ipv4, ipv6 string
for _, addr := range na.Addresses {
ip, _, err := net.ParseCIDR(addr)
if err != nil {
continue
}
if ip.To4() != nil {
ipv4 = ip.String()
continue
}
if ip.To16() != nil {
ipv6 = ip.String()
}
}
return &network.EndpointSettings{
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: ipv4,
IPv6Address: ipv6,
},
}
}
func (c *containerConfig) virtualIP(networkID string) string {
if c.task.Endpoint == nil {
return ""
}
for _, eVip := range c.task.Endpoint.VirtualIPs {
// We only support IPv4 VIPs for now.
if eVip.NetworkID == networkID {
vip, _, err := net.ParseCIDR(eVip.Addr)
if err != nil {
return ""
}
return vip.String()
}
}
return ""
}
func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
if len(c.task.Networks) == 0 {
return nil
}
log.Printf("Creating service config in agent for t = %+v", c.task)
svcCfg := &clustertypes.ServiceConfig{
Name: c.task.ServiceAnnotations.Name,
ID: c.task.ServiceID,
VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
}
for _, na := range c.task.Networks {
svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
// We support only IPv4 virtual IP for now.
IPv4: c.virtualIP(na.Network.ID),
}
}
if c.task.Endpoint != nil {
for _, ePort := range c.task.Endpoint.Ports {
svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
Name: ePort.Name,
Protocol: int32(ePort.Protocol),
TargetPort: ePort.TargetPort,
PublishedPort: ePort.PublishedPort,
})
}
}
return svcCfg
}
// networks returns a list of network names attached to the container. The
// returned name can be used to lookup the corresponding network create
// options.
func (c *containerConfig) networks() []string {
var networks []string
for name := range c.networksAttachments {
networks = append(networks, name)
}
return networks
}
func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
na, ok := c.networksAttachments[name]
if !ok {
return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
}
options := types.NetworkCreate{
// ID: na.Network.ID,
Driver: na.Network.DriverState.Name,
IPAM: network.IPAM{
Driver: na.Network.IPAM.Driver.Name,
},
Options: na.Network.DriverState.Options,
CheckDuplicate: true,
}
for _, ic := range na.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
}

View File

@ -0,0 +1,305 @@
package container
import (
"errors"
"fmt"
"strings"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/engine-api/types"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"golang.org/x/net/context"
)
// controller implements agent.Controller against docker's API.
//
// Most operations against docker's API are done through the container name,
// which is unique to the task.
type controller struct {
backend executorpkg.Backend
task *api.Task
adapter *containerAdapter
closed chan struct{}
err error
}
var _ exec.Controller = &controller{}
// NewController returns a dockerexec runner for the provided task.
func newController(b executorpkg.Backend, task *api.Task) (*controller, error) {
adapter, err := newContainerAdapter(b, task)
if err != nil {
return nil, err
}
return &controller{
backend: b,
task: task,
adapter: adapter,
closed: make(chan struct{}),
}, nil
}
func (r *controller) Task() (*api.Task, error) {
return r.task, nil
}
// ContainerStatus returns the container-specific status for the task.
func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
if isUnknownContainer(err) {
return nil, nil
}
return nil, err
}
return parseContainerStatus(ctnr)
}
// Update tasks a recent task update and applies it to the container.
func (r *controller) Update(ctx context.Context, t *api.Task) error {
log.G(ctx).Warnf("task updates not yet supported")
// TODO(stevvooe): While assignment of tasks is idempotent, we do allow
// updates of metadata, such as labelling, as well as any other properties
// that make sense.
return nil
}
// Prepare creates a container and ensures the image is pulled.
//
// If the container has already be created, exec.ErrTaskPrepared is returned.
func (r *controller) Prepare(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
// Make sure all the networks that the task needs are created.
if err := r.adapter.createNetworks(ctx); err != nil {
return err
}
// Make sure all the volumes that the task needs are created.
if err := r.adapter.createVolumes(ctx, r.backend); err != nil {
return err
}
for {
if err := r.checkClosed(); err != nil {
return err
}
if err := r.adapter.create(ctx, r.backend); err != nil {
if isContainerCreateNameConflict(err) {
if _, err := r.adapter.inspect(ctx); err != nil {
return err
}
// container is already created. success!
return exec.ErrTaskPrepared
}
if !strings.Contains(err.Error(), "No such image") { // todo: better error detection
return err
}
if err := r.adapter.pullImage(ctx); err != nil {
return err
}
continue // retry to create the container
}
break
}
return nil
}
// Start the container. An error will be returned if the container is already started.
func (r *controller) Start(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
return err
}
// Detect whether the container has *ever* been started. If so, we don't
// issue the start.
//
// TODO(stevvooe): This is very racy. While reading inspect, another could
// start the process and we could end up starting it twice.
if ctnr.State.Status != "created" {
return exec.ErrTaskStarted
}
if err := r.adapter.start(ctx); err != nil {
return err
}
return nil
}
// Wait on the container to exit.
func (r *controller) Wait(pctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
ctx, cancel := context.WithCancel(pctx)
defer cancel()
c, err := r.adapter.wait(ctx)
if err != nil {
return err
}
<-c
if ctx.Err() != nil {
return ctx.Err()
}
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
// TODO(stevvooe): Need to handle missing container here. It is likely
// that a Wait call with a not found error should result in no waiting
// and no error at all.
return err
}
if ctnr.State.ExitCode != 0 {
var cause error
if ctnr.State.Error != "" {
cause = errors.New(ctnr.State.Error)
}
cstatus, _ := parseContainerStatus(ctnr)
return &exitError{
code: ctnr.State.ExitCode,
cause: cause,
containerStatus: cstatus,
}
}
return nil
}
// Shutdown the container cleanly.
func (r *controller) Shutdown(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
if err := r.adapter.shutdown(ctx); err != nil {
if isUnknownContainer(err) || isStoppedContainer(err) {
return nil
}
return err
}
return nil
}
// Terminate the container, with force.
func (r *controller) Terminate(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
if err := r.adapter.terminate(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
return err
}
return nil
}
// Remove the container and its resources.
func (r *controller) Remove(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
// It may be necessary to shut down the task before removing it.
if err := r.Shutdown(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
// This may fail if the task was already shut down.
log.G(ctx).WithError(err).Debug("shutdown failed on removal")
}
// Try removing networks referenced in this task in case this
// task is the last one referencing it
if err := r.adapter.removeNetworks(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
return err
}
if err := r.adapter.remove(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
return err
}
return nil
}
// Close the runner and clean up any ephemeral resources.
func (r *controller) Close() error {
select {
case <-r.closed:
return r.err
default:
r.err = exec.ErrControllerClosed
close(r.closed)
}
return nil
}
func (r *controller) checkClosed() error {
select {
case <-r.closed:
return r.err
default:
return nil
}
}
func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) {
status := &api.ContainerStatus{
ContainerID: ctnr.ID,
PID: int32(ctnr.State.Pid),
ExitCode: int32(ctnr.State.ExitCode),
}
return status, nil
}
type exitError struct {
code int
cause error
containerStatus *api.ContainerStatus
}
func (e *exitError) Error() string {
if e.cause != nil {
return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause)
}
return fmt.Sprintf("task: non-zero exit (%v)", e.code)
}
func (e *exitError) ExitCode() int {
return int(e.containerStatus.ExitCode)
}
func (e *exitError) Cause() error {
return e.cause
}

View File

@ -0,0 +1,12 @@
package container
import "fmt"
var (
// ErrImageRequired returned if a task is missing the image definition.
ErrImageRequired = fmt.Errorf("dockerexec: image required")
// ErrContainerDestroyed returned when a container is prematurely destroyed
// during a wait call.
ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed")
)

View File

@ -0,0 +1,139 @@
package container
import (
"strings"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/network"
networktypes "github.com/docker/libnetwork/types"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
type executor struct {
backend executorpkg.Backend
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend) exec.Executor {
return &executor{
backend: b,
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info, err := e.backend.SystemInfo()
if err != nil {
return nil, err
}
var plugins []api.PluginDescription
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins = append(plugins, api.PluginDescription{
Type: typ,
Name: name,
})
}
}
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: plugins,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
},
}
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
na := node.Attachment
if na == nil {
return nil
}
options := types.NetworkCreate{
Driver: na.Network.DriverState.Name,
IPAM: network.IPAM{
Driver: na.Network.IPAM.Driver.Name,
},
Options: na.Network.DriverState.Options,
CheckDuplicate: true,
}
for _, ic := range na.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
na.Network.ID,
types.NetworkCreateRequest{
Name: na.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, na.Addresses[0])
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
ctlr, err := newController(e.backend, t)
if err != nil {
return nil, err
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}

93
daemon/cluster/filters.go Normal file
View File

@ -0,0 +1,93 @@
package cluster
import (
"fmt"
"strings"
runconfigopts "github.com/docker/docker/runconfig/opts"
"github.com/docker/engine-api/types/filters"
swarmapi "github.com/docker/swarmkit/api"
)
func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
"role": true,
"membership": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
f := &swarmapi.ListNodesRequest_Filters{
Names: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
}
for _, r := range filter.Get("role") {
if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok {
f.Roles = append(f.Roles, swarmapi.NodeRole(role))
} else if r != "" {
return nil, fmt.Errorf("Invalid role filter: '%s'", r)
}
}
for _, a := range filter.Get("membership") {
if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok {
f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership))
} else if a != "" {
return nil, fmt.Errorf("Invalid membership filter: '%s'", a)
}
}
return f, nil
}
func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) {
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
return &swarmapi.ListServicesRequest_Filters{
Names: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
}, nil
}
func newListTasksFilters(filter filters.Args) (*swarmapi.ListTasksRequest_Filters, error) {
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
"service": true,
"node": true,
"desired_state": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
f := &swarmapi.ListTasksRequest_Filters{
Names: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
ServiceIDs: filter.Get("service"),
NodeIDs: filter.Get("node"),
}
for _, s := range filter.Get("desired_state") {
if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
} else if s != "" {
return nil, fmt.Errorf("Invalid desired_state filter: '%s'", s)
}
}
return f, nil
}

108
daemon/cluster/helpers.go Normal file
View File

@ -0,0 +1,108 @@
package cluster
import (
"fmt"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) {
rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{})
if err != nil {
return nil, err
}
if len(rl.Clusters) == 0 {
return nil, fmt.Errorf("swarm not found")
}
// TODO: assume one cluster only
return rl.Clusters[0], nil
}
func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) {
// GetNode to match via full ID.
rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input})
if err != nil {
// If any error (including NotFound), ListNodes to match via full name.
rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}})
if err != nil || len(rl.Nodes) == 0 {
// If any error or 0 result, ListNodes to match via ID prefix.
rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}})
}
if err != nil {
return nil, err
}
if len(rl.Nodes) == 0 {
return nil, fmt.Errorf("node %s not found", input)
}
if l := len(rl.Nodes); l > 1 {
return nil, fmt.Errorf("node %s is ambigious (%d matches found)", input, l)
}
return rl.Nodes[0], nil
}
return rg.Node, nil
}
func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) {
// GetService to match via full ID.
rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input})
if err != nil {
// If any error (including NotFound), ListServices to match via full name.
rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}})
if err != nil || len(rl.Services) == 0 {
// If any error or 0 result, ListServices to match via ID prefix.
rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}})
}
if err != nil {
return nil, err
}
if len(rl.Services) == 0 {
return nil, fmt.Errorf("service %s not found", input)
}
if l := len(rl.Services); l > 1 {
return nil, fmt.Errorf("service %s is ambigious (%d matches found)", input, l)
}
return rl.Services[0], nil
}
return rg.Service, nil
}
func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) {
// GetTask to match via full ID.
rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input})
if err != nil {
// If any error (including NotFound), ListTasks to match via full name.
rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}})
if err != nil || len(rl.Tasks) == 0 {
// If any error or 0 result, ListTasks to match via ID prefix.
rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}})
}
if err != nil {
return nil, err
}
if len(rl.Tasks) == 0 {
return nil, fmt.Errorf("task %s not found", input)
}
if l := len(rl.Tasks); l > 1 {
return nil, fmt.Errorf("task %s is ambigious (%d matches found)", input, l)
}
return rl.Tasks[0], nil
}
return rg.Task, nil
}

View File

@ -0,0 +1,36 @@
package provider
import "github.com/docker/engine-api/types"
// NetworkCreateRequest is a request when creating a network.
type NetworkCreateRequest struct {
ID string
types.NetworkCreateRequest
}
// NetworkCreateResponse is a response when creating a network.
type NetworkCreateResponse struct {
ID string `json:"Id"`
}
// VirtualAddress represents a virtual adress.
type VirtualAddress struct {
IPv4 string
IPv6 string
}
// PortConfig represents a port configuration.
type PortConfig struct {
Name string
Protocol int32
TargetPort uint32
PublishedPort uint32
}
// ServiceConfig represents a service configuration.
type ServiceConfig struct {
ID string
Name string
VirtualAddresses map[string]*VirtualAddress
ExposedPorts []*PortConfig
}

View File

@ -101,7 +101,7 @@ func (daemon *Daemon) Register(c *container.Container) error {
return nil
}
func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) {
func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID, managed bool) (*container.Container, error) {
var (
id string
err error
@ -117,6 +117,7 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, i
base := daemon.newBaseContainer(id)
base.Created = time.Now().UTC()
base.Managed = managed
base.Path = entrypoint
base.Args = args //FIXME: de-duplicate from config
base.Config = config

View File

@ -324,6 +324,10 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
return nil
}
func errClusterNetworkOnRun(n string) error {
return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can be only used docker service", n)
}
// updateContainerNetworkSettings update the network settings
func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error {
var (
@ -345,6 +349,9 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
if err != nil {
return err
}
if !container.Managed && n.Info().Dynamic() {
return errClusterNetworkOnRun(networkName)
}
networkName = n.Name()
}
if container.NetworkSettings == nil {

View File

@ -19,8 +19,17 @@ import (
"github.com/opencontainers/runc/libcontainer/label"
)
// ContainerCreate creates a container.
// CreateManagedContainer creates a container that is managed by a Service
func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
return daemon.containerCreate(params, true)
}
// ContainerCreate creates a regular container
func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
return daemon.containerCreate(params, false)
}
func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (types.ContainerCreateResponse, error) {
if params.Config == nil {
return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container")
}
@ -43,7 +52,7 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types
return types.ContainerCreateResponse{Warnings: warnings}, err
}
container, err := daemon.create(params)
container, err := daemon.create(params, managed)
if err != nil {
return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err)
}
@ -52,7 +61,7 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types
}
// Create creates a new container from the given configuration with a given name.
func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) {
func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) {
var (
container *container.Container
img *image.Image
@ -76,7 +85,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *containe
return nil, err
}
if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil {
if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil {
return nil, err
}
defer func() {

View File

@ -28,6 +28,7 @@ import (
"github.com/docker/docker/daemon/exec"
"github.com/docker/engine-api/types"
containertypes "github.com/docker/engine-api/types/container"
"github.com/docker/libnetwork/cluster"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
dmetadata "github.com/docker/docker/distribution/metadata"
@ -94,6 +95,7 @@ type Daemon struct {
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
}
func (daemon *Daemon) restore() error {
@ -344,6 +346,12 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str
return nil
}
// SetClusterProvider sets a component for quering the current cluster state.
func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
@ -893,6 +901,10 @@ func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
return nil
}
if daemon.clusterProvider != nil {
return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")
}
// enable discovery for the first time if it was not previously enabled
if daemon.discoveryWatcher == nil {
discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)

View File

@ -23,10 +23,12 @@ func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (
case versions.Equal(version, "1.20"):
return daemon.containerInspect120(name)
}
return daemon.containerInspectCurrent(name, size)
return daemon.ContainerInspectCurrent(name, size)
}
func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
// ContainerInspectCurrent returns low-level information about a
// container in a most recent api version.
func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err

View File

@ -28,7 +28,7 @@ func addMountPoints(container *container.Container) []types.MountPoint {
// containerInspectPre120 get containers for pre 1.20 APIs.
func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) {
return daemon.containerInspectCurrent(name, false)
return daemon.ContainerInspectCurrent(name, false)
}
func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {

View File

@ -91,6 +91,17 @@ func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.C
return daemon.reduceContainers(config, daemon.transformContainer)
}
// ListContainersForNode returns all containerID that match the specified nodeID
func (daemon *Daemon) ListContainersForNode(nodeID string) []string {
var ids []string
for _, c := range daemon.List() {
if c.Config.Labels["com.docker.swarm.node.id"] == nodeID {
ids = append(ids, c.ID)
}
}
return ids
}
func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container {
idSearch := false
names := ctx.filters.Get("name")

View File

@ -5,13 +5,14 @@ import (
"net"
"strings"
netsettings "github.com/docker/docker/daemon/network"
"github.com/Sirupsen/logrus"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/errors"
"github.com/docker/docker/runconfig"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/libnetwork"
networktypes "github.com/docker/libnetwork/types"
)
// NetworkControllerEnabled checks if the networking stack is enabled.
@ -92,9 +93,106 @@ func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
return list
}
func isIngressNetwork(name string) bool {
return name == "ingress"
}
var ingressChan = make(chan struct{}, 1)
func ingressWait() func() {
ingressChan <- struct{}{}
return func() { <-ingressChan }
}
// SetupIngress setups ingress networking.
func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error {
ip, _, err := net.ParseCIDR(nodeIP)
if err != nil {
return err
}
go func() {
controller := daemon.netController
controller.AgentInitWait()
if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID {
if err := controller.SandboxDestroy("ingress-sbox"); err != nil {
logrus.Errorf("Failed to delete stale ingress sandbox: %v", err)
return
}
if err := n.Delete(); err != nil {
logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err)
return
}
}
if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
// If it is any other error other than already
// exists error log error and return.
if _, ok := err.(libnetwork.NetworkNameError); !ok {
logrus.Errorf("Failed creating ingress network: %v", err)
return
}
// Otherwise continue down the call to create or recreate sandbox.
}
n, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
return
}
sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress())
if err != nil {
logrus.Errorf("Failed creating ingress sanbox: %v", err)
return
}
ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil))
if err != nil {
logrus.Errorf("Failed creating ingress endpoint: %v", err)
return
}
if err := ep.Join(sb, nil); err != nil {
logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err)
}
}()
return nil
}
// SetNetworkBootstrapKeys sets the bootstrap keys.
func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
return daemon.netController.SetKeys(keys)
}
// CreateManagedNetwork creates an agent network.
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
return err
}
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) {
resp, err := daemon.createNetwork(create, "", false)
if err != nil {
return nil, err
}
return resp, err
}
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
// If there is a pending ingress network creation wait here
// since ingress network creation can happen via node download
// from manager or task download.
if isIngressNetwork(create.Name) {
defer ingressWait()()
}
if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
return nil, errors.NewRequestForbiddenError(err)
}
@ -134,7 +232,16 @@ func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.N
if create.Internal {
nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
}
n, err := c.NewNetwork(driver, create.Name, "", nwOptions...)
if agent {
nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
}
if isIngressNetwork(create.Name) {
nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress())
}
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
return nil, err
}
@ -168,6 +275,17 @@ func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnet
return ipamV4Cfg, ipamV6Cfg, nil
}
// UpdateContainerServiceConfig updates a service configuration.
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
container, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
container.NetworkSettings.Service = serviceConfig
return nil
}
// ConnectContainerToNetwork connects the given container to the given
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
@ -207,18 +325,29 @@ func (daemon *Daemon) GetNetworkDriverList() map[string]bool {
driver := network.Type()
pluginList[driver] = true
}
// TODO : Replace this with proper libnetwork API
pluginList["overlay"] = true
return pluginList
}
// DeleteManagedNetwork deletes an agent network.
func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
return daemon.deleteNetwork(networkID, true)
}
// DeleteNetwork destroys a network unless it's one of docker's predefined networks.
func (daemon *Daemon) DeleteNetwork(networkID string) error {
return daemon.deleteNetwork(networkID, false)
}
func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
nw, err := daemon.FindNetwork(networkID)
if err != nil {
return err
}
if runconfig.IsPreDefinedNetwork(nw.Name()) {
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return errors.NewRequestForbiddenError(err)
}
@ -230,14 +359,7 @@ func (daemon *Daemon) DeleteNetwork(networkID string) error {
return nil
}
// FilterNetworks returns a list of networks filtered by the given arguments.
// It returns an error if the filters are not included in the list of accepted filters.
func (daemon *Daemon) FilterNetworks(netFilters filters.Args) ([]libnetwork.Network, error) {
if netFilters.Len() != 0 {
if err := netFilters.Validate(netsettings.AcceptedFilters); err != nil {
return nil, err
}
}
nwList := daemon.getAllNetworks()
return netsettings.FilterNetworks(nwList, netFilters)
// GetNetworks returns a list of all networks
func (daemon *Daemon) GetNetworks() []libnetwork.Network {
return daemon.getAllNetworks()
}

View File

@ -1,6 +1,7 @@
package network
import (
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/engine-api/types/network"
"github.com/docker/go-connections/nat"
)
@ -14,6 +15,7 @@ type Settings struct {
LinkLocalIPv6Address string
LinkLocalIPv6PrefixLen int
Networks map[string]*networktypes.EndpointSettings
Service *clustertypes.ServiceConfig
Ports nat.PortMap
SandboxKey string
SecondaryIPAddresses []networktypes.Address

View File

@ -1,6 +1,10 @@
package daemon
import "time"
import (
"time"
"golang.org/x/net/context"
)
// ContainerWait stops processing until the given container is
// stopped. If the container is not found, an error is returned. On a
@ -15,3 +19,14 @@ func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, er
return container.WaitStop(timeout)
}
// ContainerWaitWithContext returns a channel where exit code is sent
// when container stops. Channel can be cancelled with a context.
func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) (<-chan int, error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
return container.WaitWithContext(ctx), nil
}

File diff suppressed because it is too large Load Diff

View File

@ -86,3 +86,23 @@ You start the Docker daemon with the command line. How you start the daemon affe
* [volume_inspect](volume_inspect.md)
* [volume_ls](volume_ls.md)
* [volume_rm](volume_rm.md)
### Swarm node commands
* [node_accept](node_accept.md)
* [node_reject](node_reject.md)
* [node_promote](node_promote.md)
* [node_demote](node_demote.md)
* [node_inspect](node_inspect.md)
* [node_update](node_update.md)
* [node_tasks](node_tasks.md)
* [node_ls](node_ls.md)
* [node_rm](node_rm.md)
### Swarm swarm commands
* [swarm init](swarm_init.md)
* [swarm join](swarm_join.md)
* [swarm leave](swarm_leave.md)
* [swarm update](swarm_update.md)

View File

@ -37,7 +37,7 @@ available on the volume where `/var/lib/docker` is mounted.
## Display Docker system information
Here is a sample output for a daemon running on Ubuntu, using the overlay
storage driver:
storage driver and a node that is part of a 2 node Swarm cluster:
$ docker -D info
Containers: 14
@ -53,6 +53,11 @@ storage driver:
Plugins:
Volume: local
Network: bridge null host
Swarm:
NodeID: 0gac67oclbxq7
IsManager: YES
Managers: 2
Nodes: 2
Kernel Version: 4.4.0-21-generic
Operating System: Ubuntu 16.04 LTS
OSType: linux

View File

@ -10,15 +10,15 @@ parent = "smn_cli"
# inspect
Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
Usage: docker inspect [OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]
Return low-level information on a container or image
Return low-level information on a container or image or task
-f, --format="" Format the output using the given go template
--help Print usage
--type=container|image Return JSON for specified type, permissible
values are "image" or "container"
-s, --size Display total file sizes if the type is container
-f, --format="" Format the output using the given go template
--help Print usage
--type=container|image|task Return JSON for specified type, permissible
values are "image" or "container" or "task"
-s, --size Display total file sizes if the type is container
By default, this will render all results in a JSON array. If the container and
image have the same name, this will return container JSON for unspecified type.
@ -47,6 +47,10 @@ straightforward manner.
$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID
**Get a Task's image name:**
$ docker inspect --format='{{.Container.Spec.Image}}' $INSTANCE_ID
**List All Port Bindings:**
One can loop over arrays and maps in the results to produce simple text

View File

@ -0,0 +1,28 @@
<!--[metadata]>
+++
title = "node accept"
description = "The node accept command description and usage"
keywords = ["node, accept"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
# node accept
Usage: docker node accept NODE [NODE...]
Accept a node in the swarm
Accept a node into the swarm. This command targets a docker engine that is a manager in the swarm cluster.
```bash
$ docker node accept <node name>
```
## Related information
* [node reject](node_reject.md)
* [node promote](node_promote.md)
* [node demote](node_demote.md)

View File

@ -0,0 +1,28 @@
<!--[metadata]>
+++
title = "node demote"
description = "The node demote command description and usage"
keywords = ["node, demote"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
# node demote
Usage: docker node demote NODE [NODE...]
Demote a node as manager in the swarm
Demotes an existing Manager so that it is no longer a manager. This command targets a docker engine that is a manager in the swarm cluster.
```bash
$ docker node demote <node name>
```
## Related information
* [node accept](node_accept.md)
* [node reject](node_reject.md)
* [node promote](node_promote.md)

View File

@ -0,0 +1,108 @@
<!--[metadata]>
+++
title = "node inspect"
description = "The node inspect command description and usage"
keywords = ["node, inspect"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# node inspect
Usage: docker node inspect [OPTIONS] self|NODE [NODE...]
Return low-level information on a volume
-f, --format= Format the output using the given go template.
--help Print usage
-p, --pretty Print the information in a human friendly format.
Returns information about a node. By default, this command renders all results
in a JSON array. You can specify an alternate format to execute a
given template for each result. Go's
[text/template](http://golang.org/pkg/text/template/) package describes all the
details of the format.
Example output:
$ docker node inspect swarm-manager
[
{
"ID": "0gac67oclbxq7",
"Version": {
"Index": 2028
},
"CreatedAt": "2016-06-06T20:49:32.720047494Z",
"UpdatedAt": "2016-06-07T00:23:31.207632893Z",
"Spec": {
"Role": "MANAGER",
"Membership": "ACCEPTED",
"Availability": "ACTIVE"
},
"Description": {
"Hostname": "swarm-manager",
"Platform": {
"Architecture": "x86_64",
"OS": "linux"
},
"Resources": {
"NanoCPUs": 1000000000,
"MemoryBytes": 1044250624
},
"Engine": {
"EngineVersion": "1.12.0",
"Labels": {
"provider": "virtualbox"
}
}
},
"Status": {
"State": "READY"
},
"Manager": {
"Raft": {
"RaftID": 2143745093569717375,
"Addr": "192.168.99.118:4500",
"Status": {
"Leader": true,
"Reachability": "REACHABLE"
}
}
},
"Attachment": {},
}
]
$ docker node inspect --format '{{ .Manager.Raft.Status.Leader }}' self
false
$ docker node inspect --pretty self
ID: 2otfhz83efcc7
Hostname: ad960a848573
Status:
State: Ready
Availability: Active
Manager Status:
Address: 172.17.0.2:2377
Raft status: Reachable
Leader: Yes
Platform:
Operating System: linux
Architecture: x86_64
Resources:
CPUs: 4
Memory: 7.704 GiB
Plugins:
Network: overlay, bridge, null, host, overlay
Volume: local
Engine Version: 1.12.0
## Related information
* [node update](node_update.md)
* [node tasks](node_tasks.md)
* [node ls](node_ls.md)
* [node rm](node_rm.md)

View File

@ -0,0 +1,89 @@
<!--[metadata]>
+++
title = "node ls"
description = "The node ls command description and usage"
keywords = ["node, list"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# node ls
Usage: docker node ls [OPTIONS]
List nodes in the swarm
Aliases:
ls, list
Options:
-f, --filter value Filter output based on conditions provided
--help Print usage
-q, --quiet Only display IDs
Lists all the nodes that the Docker Swarm manager knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options.
Example output:
$ docker node ls
ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
0gac67oclbxq swarm-master Ready Active Reachable Yes
0pwvm3ve66q7 swarm-node-02 Ready Active
15xwihgw71aw * swarm-node-01 Ready Active Reachable
## Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* name
* id
* label
* desired_state
### name
The `name` filter matches on all or part of a tasks's name.
The following filter matches the node with a name equal to `swarm-master` string.
$ docker node ls -f name=swarm-master
ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
0gac67oclbxq * swarm-master Ready Active Reachable Yes
### id
The `id` filter matches all or part of a node's id.
$ docker node ls -f id=0
ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
0gac67oclbxq * swarm-master Ready Active Reachable Yes
0pwvm3ve66q7 swarm-node-02 Ready Active
#### label
The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a
value.
The following filter matches nodes with the `usage` label regardless of its value.
```bash
$ docker node ls -f "label=foo"
ID NAME STATUS AVAILABILITY MANAGER STATUS LEADER
15xwihgw71aw * swarm-node-01 Ready Active Reachable
```
## Related information
* [node inspect](node_inspect.md)
* [node update](node_update.md)
* [node tasks](node_tasks.md)
* [node rm](node_rm.md)

View File

@ -0,0 +1,28 @@
<!--[metadata]>
+++
title = "node promote"
description = "The node promote command description and usage"
keywords = ["node, promote"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
# node promote
Usage: docker node promote NODE [NODE...]
Promote a node as manager in the swarm
Promotes a node that is pending a promotion to manager. This command targets a docker engine that is a manager in the swarm cluster.
```bash
$ docker node promote <node name>
```
## Related information
* [node accept](node_accept.md)
* [node reject](node_reject.md)
* [node demote](node_demote.md)

View File

@ -0,0 +1,28 @@
<!--[metadata]>
+++
title = "node reject"
description = "The node reject command description and usage"
keywords = ["node, reject"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
# node reject
Usage: docker node reject NODE [NODE...]
Reject a node from the swarm
Reject a node from joining the swarm. This command targets a docker engine that is a manager in the swarm cluster.
```bash
$ docker node reject <node name>
```
## Related information
* [node accept](node_accept.md)
* [node promote](node_promote.md)
* [node demote](node_demote.md)

View File

@ -0,0 +1,38 @@
<!--[metadata]>
+++
title = "node rm"
description = "The node rm command description and usage"
keywords = ["node, remove"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# node rm
Usage: docker node rm NODE [NODE...]
Remove a node from the swarm
Aliases:
rm, remove
Options:
--help Print usage
Removes nodes that are specified.
Example output:
$ docker node rm swarm-node-02
Node swarm-node-02 removed from Swarm
## Related information
* [node inspect](node_inspect.md)
* [node update](node_update.md)
* [node tasks](node_tasks.md)
* [node ls](node_ls.md)

View File

@ -0,0 +1,94 @@
<!--[metadata]>
+++
title = "node tasks"
description = "The node tasks command description and usage"
keywords = ["node, tasks"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# node tasks
Usage: docker node tasks [OPTIONS] NODE
List tasks running on a node
Options:
-a, --all Display all instances
-f, --filter value Filter output based on conditions provided
--help Print usage
-n, --no-resolve Do not map IDs to Names
Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options.
Example output:
$ docker node tasks swarm-master
ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
dx2g0fe3zsdb6y6q453f8dqw2 redis.1 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
5v26yzixl3one3ptjyqqbd0ro redis.5 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
adcaphlhsfr30d47lby6walg6 redis.8 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
chancjvk9tex6768uzzacslq2 redis.9 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
## Filtering
The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
The currently supported filters are:
* name
* id
* label
* desired_state
### name
The `name` filter matches on all or part of a task's name.
The following filter matches all tasks with a name containing the `redis` string.
$ docker node tasks -f name=redis swarm-master
ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
dx2g0fe3zsdb6y6q453f8dqw2 redis.1 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
5v26yzixl3one3ptjyqqbd0ro redis.5 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
adcaphlhsfr30d47lby6walg6 redis.8 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
chancjvk9tex6768uzzacslq2 redis.9 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
### id
The `id` filter matches a task's id.
$ docker node tasks -f id=f33pcf8lwhs4c1t4kq8szwzta swarm-master
ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
#### label
The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a
value.
The following filter matches tasks with the `usage` label regardless of its value.
```bash
$ docker node tasks -f "label=usage"
ID NAME SERVICE IMAGE DESIRED STATE LAST STATE NODE
dx2g0fe3zsdb6y6q453f8dqw2 redis.1 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
f33pcf8lwhs4c1t4kq8szwzta redis.4 redis redis:3.0.6 RUNNING RUNNING 2 hours swarm-master
```
## Related information
* [node inspect](node_inspect.md)
* [node update](node_update.md)
* [node ls](node_ls.md)
* [node rm](node_rm.md)

View File

@ -0,0 +1,26 @@
<!--[metadata]>
+++
title = "node update"
description = "The node update command description and usage"
keywords = ["resources, update, dynamically"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
## update
Usage: docker node update [OPTIONS] Node
Update a node
## Related information
* [node inspect](node_inspect.md)
* [node tasks](node_tasks.md)
* [node ls](node_ls.md)
* [node rm](node_rm.md)

View File

@ -0,0 +1,69 @@
<!--[metadata]>
+++
title = "swarm init"
description = "The swarm init command description and usage"
keywords = ["swarm, init"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# swarm init
Usage: docker swarm init [OPTIONS]
Initialize a Swarm.
Options:
--auto-accept value Acceptance policy (default [worker,manager])
--force-new-cluster Force create a new cluster from current state.
--help Print usage
--listen-addr value Listen address (default 0.0.0.0:2377)
--secret string Set secret value needed to accept nodes into cluster
Initialize a Swarm cluster. The docker engine targeted by this command becomes a manager
in the newly created one node Swarm cluster.
```bash
$ docker swarm init --listen-addr 192.168.99.121:2377
Initializing a new swarm
$ docker node ls
ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
```
### --auto-accept value
This flag controls node acceptance into the cluster. By default, both `worker` and `manager`
nodes are auto accepted by the cluster. This can be changed by specifing what kinds of nodes
can be auto-accepted into the cluster. If auto-accept is not turned on, then
[node accept](node_accept.md) can be used to explicitly accept a node into the cluster.
For example, the following initializes a cluster with auto-acceptance of workers, but not managers
```bash
$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker
Initializing a new swarm
```
### `--force-new-cluster`
This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data
### `--listen-addr value`
The node listens for inbound Swarm manager traffic on this IP:PORT
### `--secret string`
Secret value needed to accept nodes into the Swarm
## Related information
* [swarm join](swarm_join.md)
* [swarm leave](swarm_leave.md)
* [swarm update](swarm_update.md)

View File

@ -0,0 +1,68 @@
<!--[metadata]>
+++
title = "swarm join"
description = "The swarm join command description and usage"
keywords = ["swarm, join"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# swarm join
Usage: docker swarm join [OPTIONS] HOST:PORT
Join a Swarm as a node and/or manager.
Options:
--help Print usage
--listen-addr value Listen address (default 0.0.0.0:2377)
--manager Try joining as a manager.
--secret string Secret for node acceptance
Join a node to a Swarm cluster. If the `--manager` flag is specified, the docker engine
targeted by this command becomes a `manager`. If it is not specified, it becomes a `worker`.
### Join a node to swarm as a manager
```bash
$ docker swarm join --manager --listen-addr 192.168.99.122:2377 192.168.99.121:2377
This node is attempting to join a Swarm as a manager.
$ docker node ls
ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
```
### Join a node to swarm as a worker
```bash
$ docker swarm join --listen-addr 192.168.99.123:2377 192.168.99.121:2377
This node is attempting to join a Swarm.
$ docker node ls
ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
04zm7ue1fd1q swarm-node-02 READY ACTIVE
2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
```
### `--manager`
Joins the node as a manager
### `--listen-addr value`
The node listens for inbound Swarm manager traffic on this IP:PORT
### `--secret string`
Secret value required for nodes to join the swarm
## Related information
* [swarm init](swarm_init.md)
* [swarm leave](swarm_leave.md)
* [swarm update](swarm_update.md)

View File

@ -0,0 +1,52 @@
<!--[metadata]>
+++
title = "swarm leave"
description = "The swarm leave command description and usage"
keywords = ["swarm, leave"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# swarm leave
Usage: docker swarm leave
Leave a Swarm swarm.
Options:
--help Print usage
This command causes the node to leave the swarm.
On a manager node:
```bash
$ docker node ls
ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
04zm7ue1fd1q swarm-node-02 READY ACTIVE
2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
```
On a worker node:
```bash
$ docker swarm leave
Node left the default swarm.
```
On a manager node:
```bash
$ docker node ls
ID NAME STATUS AVAILABILITY/MEMBERSHIP MANAGER STATUS LEADER
04zm7ue1fd1q swarm-node-02 DOWN ACTIVE
2fg70txcrde2 swarm-node-01 READY ACTIVE REACHABLE
3l1f6uzcuoa3 * swarm-master READY ACTIVE REACHABLE Yes
```
## Related information
* [swarm init](swarm_init.md)
* [swarm join](swarm_join.md)
* [swarm update](swarm_update.md)

View File

@ -0,0 +1,37 @@
<!--[metadata]>
+++
title = "swarm update"
description = "The swarm update command description and usage"
keywords = ["swarm, update"]
[menu.main]
parent = "smn_cli"
+++
<![end-metadata]-->
**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes.
# swarm update
Usage: docker swarm update [OPTIONS]
update the Swarm.
Options:
--auto-accept value Acceptance policy (default [worker,manager])
--help Print usage
--secret string Set secret value needed to accept nodes into cluster
Updates a Swarm cluster with new parameter values. This command must target a manager node.
```bash
$ docker swarm update --auto-accept manager
```
## Related information
* [swarm init](swarm_init.md)
* [swarm join](swarm_join.md)
* [swarm leave](swarm_leave.md)

79
docs/swarm/index.md Normal file
View File

@ -0,0 +1,79 @@
<!--[metadata]>
+++
title = "Swarm overview"
description = "Docker Swarm overview"
keywords = ["docker, container, cluster, swarm"]
[menu.main]
identifier="swarm_overview"
parent="engine_swarm"
weight="1"
advisory = "rc"
+++
<![end-metadata]-->
# Docker Swarm overview
To use this version of Swarm, install the Docker Engine `v1.12.0-rc1` or later
from the [Docker releases GitHub
repository](https://github.com/docker/docker/releases). Alternatively, install
the latest Docker for Mac or Docker for Windows Beta.
Docker Engine 1.12 includes Docker Swarm for natively managing a cluster of
Docker Engines called a Swarm. Use the Docker CLI to create a Swarm, deploy
application services to the Swarm, and manage the Swarm behavior.
If youre using a Docker version prior to `v1.12.0-rc1`, see [Docker
Swarm](https://docs.docker.com/swarm).
## Feature highlights
* **Cluster management integrated with Docker Engine:** Use the Docker Engine
CLI to create a Swarm of Docker Engines where you can deploy application
services. You don't need additional orchestration software to create or manage
a Swarm.
* **Decentralized design:** Instead of handling differentiation between node
roles at deployment time, Swarm handles any specialization at runtime. You can
deploy both kinds of nodes, managers and workers, using the Docker Engine.
This means you can build an entire Swarm from a single disk image.
* **Declarative service model:** Swarm uses a declarative syntax to let you
define the desired state of the various services in your application stack.
For example, you might describe an application comprised of a web front end
service with message queueing services and a database backend.
* **Desired state reconciliation:** Swarm constantly monitors the cluster state
and reconciles any differences between the actual state your expressed desired
state.
* **Multi-host networking:** You can specify an overlay network for your
application. Swarm automatically assigns addresses to the containers on the
overlay network when it initializes or updates the application.
* **Service discovery:** Swarm assigns each service a unique DNS name and load
balances running containers. Each Swarm has an internal DNS server that can
query every container in the cluster using DNS.
* **Load balancing:** Using Swarm, you can expose the ports for services to an
external load balancer. Internally, Swarm lets you specify how to distribute
service containers between nodes.
* **Secure by default:** Each node in the Swarm enforces TLS mutual
authentication and encryption to secure communications between itself and all
other nodes. You have the option to use self-signed root certificates or
certificates from a custom root CA.
* **Scaling:** For each service, you can declare the number of instances you
want to run. When you scale up or down, Swarm automatically adapts by adding
or removing instances of the service to maintain the desired state.
* **Rolling updates:** At rollout time you can apply service updates to nodes
incrementally. Swarm lets you control the delay between service deployment to
different sets of nodes. If anything goes wrong, you can roll-back an instance
of a service.
## What's next?
* Learn Swarm [key concepts](key-concepts.md).
* Get started with the [Swarm tutorial](swarm-tutorial/index.md).
<p style="margin-bottom:300px">&nbsp;</p>

View File

@ -0,0 +1,85 @@
<!--[metadata]>
+++
title = "Swarm key concepts"
description = "Introducing key concepts for Docker Swarm"
keywords = ["docker, container, cluster, swarm"]
[menu.main]
identifier="swarm-concepts"
parent="engine_swarm"
weight="2"
advisory = "rc"
+++
<![end-metadata]-->
# Docker Swarm key concepts
Building upon the core features of Docker Engine, Docker Swarm enables you to
create a Swarm of Docker Engines and orchestrate services to run in the Swarm.
This topic describes key concepts to help you begin using Docker Swarm.
## Swarm
**Docker Swarm** is the name for the cluster management and orchestration features
embedded in the Docker Engine.
A **Swarm** is a cluster of Docker Engines where you deploy a set of application
services. When you deploy an application to a Swarm, you specify the desired
state of the services, such as which services to run and how many instances of
those services. The Swarm takes care of all orchestration duties required to
keep the services running in the desired state.
## Node
A **node** is an active instance of the Docker Engine in the Swarm.
When you deploy your application to a Swarm, **manager nodes** accept the
service definition that describes the Swarm's desired state. Manager nodes also
perform the orchestration and cluster management functions required to maintain
the desired state of the Swarm. For example, when a manager node receives notice
to deploy a web server, it dispatches the service tasks to worker nodes.
By default the Docker Engine starts one manager node for a Swarm, but as you
scale you can add more managers to make the cluster more fault-tolerant. If you
require high availability Swarm management, Docker recommends three or five
Managers in your cluster.
Because Swarm manager nodes share data using Raft, there must be an odd number
of managers. The Swarm cluster can continue functioning in the face of up to
`N/2` failures where `N` is the number of manager nodes. More than five
managers is likely to degrade cluster performance and is not recommended.
**Worker nodes** receive and execute tasks dispatched from manager nodes. By
default manager nodes are also worker nodes, but you can configure managers to
be manager-only nodes.
## Services and tasks
A **service** is the definition of how to run the various tasks that make up
your application. For example, you may create a service that deploys a Redis
image in your Swarm.
A **task** is the atomic scheduling unit of Swarm. For example a task may be to
schedule a Redis container to run on a worker node.
## Service types
For **replicated services**, Swarm deploys a specific number of replica tasks
based upon the scale you set in the desired state.
For **global services**, Swarm runs one task for the service on every available
node in the cluster.
## Load balancing
Swarm uses **ingress load balancing** to expose the services you want to make
available externally to the Swarm. Swarm can automatically assign the service a
**PublishedPort** or you can configure a PublishedPort for the service in the
30000-32767 range. External components, such as cloud load balancers, can access
the service on the PublishedPort of any node in the cluster, even if the node is
not currently running the service.
Swarm has an internal DNS component that automatically assigns each service in
the Swarm DNS entry. Swarm uses **internal load balancing** distribute requests
among services within the cluster based upon the services' DNS name.
<p style="margin-bottom:300px">&nbsp;</p>

21
docs/swarm/menu.md Normal file
View File

@ -0,0 +1,21 @@
<!--[metadata]>
+++
title = "Manage a Swarm (1.12 RC)"
description = "How to use Docker Swarm to create and manage Docker Engine clusters"
keywords = [" docker, documentation, developer, "]
[menu.main]
identifier = "engine_swarm"
parent = "engine_use"
weight = 0
advisory = "rc"
+++
<![end-metadata]-->
## Use Docker Swarm to create and manage clusters of Docker Engine called Swarms
This section contains the following topics:
* [Docker Swarm overview](index.md)
* [Docker Swarm key concepts](key-concepts.md)
* [Getting Started with Docker Swarm](swarm-tutorial/index.md)

View File

@ -0,0 +1,64 @@
<!--[metadata]>
+++
title = "Add nodes to the Swarm"
description = "Add nodes to the Swarm"
keywords = ["tutorial, cluster management, swarm"]
[menu.main]
identifier="add-nodes"
parent="swarm-tutorial"
weight=13
advisory = "rc"
+++
<![end-metadata]-->
# Add nodes to the Swarm
Once you've [created a Swarm](create-swarm.md) with a manager node, you're ready
to add worker nodes.
1. Open a terminal and ssh into the machine where you want to run a worker node.
This tutorial uses the name `worker1`.
2. Run `docker swarm join MANAGER-IP:PORT` to create a worker node joined to the
existing Swarm. Replace MANAGER-IP address of the manager node and the port
where the manager listens.
In the tutorial, the following command joins `worker1` to the Swarm on `manager1`:
```
$ docker swarm join 192.168.99.100:2377
This node joined a Swarm as a worker.
```
3. Open a terminal and ssh into the machine where you want to run a second
worker node. This tutorial uses the name `worker2`.
4. Run `docker swarm join MANAGER-IP:PORT` to create a worker node joined to
the existing Swarm. Replace MANAGER-IP address of the manager node and the port
where the manager listens.
5. Open a terminal and ssh into the machine where the manager node runs and run
the `docker node ls` command to see the worker nodes:
```bash
$ docker node ls
ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
09fm6su6c24q * manager1 Accepted Ready Active Reachable Yes
32ljq6xijzb9 worker1 Accepted Ready Active
38fsncz6fal9 worker2 Accepted Ready Active
```
The `MANAGER` column identifies the manager nodes in the Swarm. The empty
status in this column for `worker1` and `worker2` identifies them as worker nodes.
Swarm management commands like `docker node ls` only work on manager nodes.
## What's next?
Now your Swarm consists of a manager and two worker nodes. In the next step of
the tutorial, you [deploy a service](deploy-service.md) to the Swarm.
<p style="margin-bottom:300px">&nbsp;</p>

View File

@ -0,0 +1,77 @@
<!--[metadata]>
+++
title = "Create a Swarm"
description = "Initialize the Swarm"
keywords = ["tutorial, cluster management, swarm"]
[menu.main]
identifier="initialize-swarm"
parent="swarm-tutorial"
weight=12
advisory = "rc"
+++
<![end-metadata]-->
# Create a Swarm
After you complete the [tutorial setup](index.md) steps, you're ready
to create a Swarm. Make sure the Docker Engine daemon is started on the host
machines.
1. Open a terminal and ssh into the machine where you want to run your manager
node. For example, the tutorial uses a machine named `manager1`.
2. Run `docker swarm init --listen-addr MANAGER-IP:PORT` to create a new Swarm.
In the tutorial, the following command creates a Swarm on the `manager1` machine:
```
$ docker swarm init --listen-addr 192.168.99.100:2377
Swarm initialized: current node (09fm6su6c24qn) is now a manager.
```
The `--listen-addr` flag configures the manager node to listen on port
`2377`. The other nodes in the Swarm must be able to access the manager at
the IP address.
3. Run `docker info` to view the current state of the Swarm:
```
$ docker info
Containers: 2
Running: 0
Paused: 0
Stopped: 2
...snip...
Swarm:
NodeID: 09fm6su6c24qn
IsManager: YES
Managers: 1
Nodes: 1
...snip...
```
4. Run the `docker node ls` command to view information about nodes:
```
$ docker node ls
ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
09fm6su6c24q * manager1 Accepted Ready Active Reachable Yes
```
The `*` next to the node id, indicates that you're currently connected on
this node.
Docker Swarm automatically names the node for the machine host name. The
tutorial covers other columns in later steps.
## What's next?
In the next section of the tutorial, we'll [add two more nodes](add-nodes.md) to
the cluster.
<p style="margin-bottom:300px">&nbsp;</p>

View File

@ -0,0 +1,44 @@
<!--[metadata]>
+++
title = "Delete the service"
description = "Remove the service on the Swarm"
keywords = ["tutorial, cluster management, swarm, service"]
[menu.main]
identifier="swarm-tutorial-delete-service"
parent="swarm-tutorial"
weight=19
advisory = "rc"
+++
<![end-metadata]-->
# Delete the service running on the Swarm
The remaining steps in the tutorial don't use the `helloworld` service, so now
you can delete the service from the Swarm.
1. If you haven't already, open a terminal and ssh into the machine where you
run your manager node. For example, the tutorial uses a machine named
`manager1`.
2. Run `docker service remove helloworld` to remove the `helloworld` service.
```
$ docker service rm helloworld
helloworld
```
3. Run `docker service inspect SERVICE-ID` to veriy that Swarm removed the
service. The CLI returns a message that the service is not found:
```
$ docker service inspect helloworld
[]
Error: no such service or task: helloworld
```
## What's next?
In the next step of the tutorial, you set up a new service and and apply a
[rolling update](rolling-update.md).
<p style="margin-bottom:300px">&nbsp;</p>

View File

@ -0,0 +1,50 @@
<!--[metadata]>
+++
title = "Deploy a service"
description = "Deploy the application"
keywords = ["tutorial, cluster management, swarm"]
[menu.main]
identifier="deploy-application"
parent="swarm-tutorial"
weight=16
advisory = "rc"
+++
<![end-metadata]-->
# Deploy a service to the Swarm
After you [create a Swarm](create-swarm.md), you can deploy a service to the
Swarm. For this tutorial, you also [added worker nodes](add-nodes.md), but that
is not a requirement to deploy a service.
1. Open a terminal and ssh into the machine where you run your manager node. For
example, the tutorial uses a machine named `manager1`.
2. Run the the following command:
```bash
$ docker service create --scale 1 --name helloworld alpine ping docker.com
2zs4helqu64f3k3iuwywbk49w
```
* The `docker service create` command creates the service.
* The `--name` flag names the service `helloworld`.
* The `--scale` flag specifies the desired state of 1 running instance.
* The arguments `alpine ping docker.com` define the service as an Alpine
Linux container that executes the command `ping docker.com`.
3. Run `docker service ls` to see the list of running services:
```
$ docker service ls
ID NAME SCALE IMAGE COMMAND
2zs4helqu64f helloworld 1 alpine ping docker.com
```
## What's next?
Now you've deployed a service to the Swarm, you're ready to [inspect the service](inspect-service.md).
<p style="margin-bottom:300px">&nbsp;</p>

View File

@ -0,0 +1,129 @@
<!--[metadata]>
+++
title = "Drain a node"
description = "Drain nodes on the Swarm"
keywords = ["tutorial, cluster management, swarm, service, drain"]
[menu.main]
identifier="swarm-tutorial-drain-node"
parent="swarm-tutorial"
weight=21
+++
<![end-metadata]-->
# Drain a node on the Swarm
In earlier steps of the tutorial, all the nodes have been running with `ACTIVE`
availability. The Swarm manager can assign tasks to any `ACTIVE` node, so all
nodes have been available to receive tasks.
Sometimes, such as planned maintenance times, you need to set a node to `DRAIN`
availabilty. `DRAIN` availabilty prevents a node from receiving new tasks
from the Swarm manager. It also means the manager stops tasks running on the
node and launches replica tasks on a node with `ACTIVE` availability.
1. If you haven't already, open a terminal and ssh into the machine where you
run your manager node. For example, the tutorial uses a machine named
`manager1`.
2. Verify that all your nodes are actively available.
```
$ docker node ls
ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
1x2bldyhie1cj worker1 Accepted Ready Active
1y3zuia1z224i worker2 Accepted Ready Active
2p5bfd34mx4op * manager1 Accepted Ready Active Reachable Yes
```
2. If you aren't still running the `redis` service from the [rolling
update](rolling-update.md) tutorial, start it now:
```bash
$ docker service create --scale 3 --name redis --update-delay 10s --update-parallelism 1 redis:3.0.6
69uh57k8o03jtqj9uvmteodbb
```
3. Run `docker service tasks redis` to see how the Swarm manager assigned the
tasks to different nodes:
```
$ docker service tasks redis
ID NAME SERVICE IMAGE LAST STATE DESIRED STATE NODE
3wfqsgxecktpwoyj2zjcrcn4r redis.1 redis redis:3.0.6 RUNNING 13 minutes RUNNING worker2
8lcm041z3v80w0gdkczbot0gg redis.2 redis redis:3.0.6 RUNNING 13 minutes RUNNING worker1
d48skceeph9lkz4nbttig1z4a redis.3 redis redis:3.0.6 RUNNING 12 minutes RUNNING manager1
```
In this case the Swarm manager distributed one task to each node. You may
see the tasks distributed differently among the nodes in your environment.
4. Run `docker node update --availability drain NODE-ID` to drain a node that
had a task assigned to it:
```bash
docker node update --availability drain worker1
worker1
```
5. Inspect the node to check its availability:
```
$ docker node inspect --pretty worker1
ID: 1x2bldyhie1cj
Hostname: worker1
Status:
State: READY
Availability: DRAIN
...snip...
```
The drained node shows `Drain` for `AVAILABILITY`.
6. Run `docker service tasks redis` to see how the Swarm manager updated the
task assignments for the `redis` service:
```
ID NAME SERVICE IMAGE LAST STATE DESIRED STATE NODE
3wfqsgxecktpwoyj2zjcrcn4r redis.1 redis redis:3.0.6 RUNNING 26 minutes RUNNING worker2
ah7o4u5upostw3up1ns9vbqtc redis.2 redis redis:3.0.6 RUNNING 9 minutes RUNNING manager1
d48skceeph9lkz4nbttig1z4a redis.3 redis redis:3.0.6 RUNNING 26 minutes RUNNING manager1
```
The Swarm manager maintains the desired state by ending the task on a node
with `Drain` availability and creating a new task on a node with `Active`
availability.
7. Run `docker node update --availability active NODE-ID` to return the drained
node to an active state:
```bash
$ docker node update --availability active worker1
worker1
```
8. Inspect the node to see the updated state:
```
$ docker node inspect --pretty worker1
ID: 1x2bldyhie1cj
Hostname: worker1
Status:
State: READY
Availability: ACTIVE
...snip...
```
When you set the node back to `Active` availability, it can receive new tasks:
* during a service update to scale up
* during a rolling update
* when you set another node to `Drain` availability
* when a task fails on another active node
## What's next?
The next topic in the tutorial introduces volumes.
<p style="margin-bottom:300px">&nbsp;</p>

View File

@ -0,0 +1,87 @@
<!--[metadata]>
+++
title = "Set up for the tutorial"
description = "Getting Started tutorial for Docker Swarm"
keywords = ["tutorial, cluster management, swarm"]
[menu.main]
identifier="tutorial-setup"
parent="swarm-tutorial"
weight=11
advisory = "rc"
+++
<![end-metadata]-->
# Getting Started with Docker Swarm
This tutorial introduces you to the key features of Docker Swarm. It guides you
through the following activities:
* initializing a cluster of Docker Engines called a Swarm
* adding nodes to the Swarm
* deploying application services to the Swarm
* managing the Swarm once you have everything running
This tutorial uses Docker Engine CLI commands entered on the command line of a
terminal window. You should be able to install Docker on networked machines and
be comfortable running commands in the shell of your choice.
If youre brand new to Docker, see [About Docker Engine](../../index.md).
## Set up
To run this tutorial, you need the following:
* [three networked host machines](#three-networked-host-machines)
* [Docker Engine 1.12 or later installed](#docker-engine-1-12-or-later)
* [the IP address of the manager machine](#the-ip-address-of-the-manager-machine)
* [open ports between the hosts](#open-ports-between-the-hosts)
### Three networked host machines
The tutorial uses three networked host machines as nodes in the Swarm. These can
be virtual machines on your PC, in a data center, or on a cloud service
provider. This tutorial uses the following machine names:
* manager1
* worker1
* worker2
### Docker Engine 1.12 or later
You must install Docker Engine on each one of the host machines. To use this
version of Swarm, install the Docker Engine `v1.12.0-rc1` or later from the
[Docker releases GitHub repository](https://github.com/docker/docker/releases).
Alternatively, install the latest Docker for Mac or Docker for Windows Beta.
Verify that the Docker Engine daemon is running on each of the machines.
<!-- See the following options to install:
* [Install Docker Engine](../../installation/index.md).
* [Example: Manual install on cloud provider](../../installation/cloud/cloud-ex-aws.md).
-->
### The IP address of the manager machine
The IP address must be assigned to an a network interface available to the host
operating system. All nodes in the Swarm must be able to access the manager at the IP address.
>**Tip**: You can run `ifconfig` on Linux or Mac OSX to see a list of the
available network interfaces.
The tutorial uses `manager1` : `192.168.99.100`.
### Open ports between the hosts
* **TCP port 2377** for cluster management communications
* **TCP** and **UDP port 7946** for communication among nodes
* **TCP** and **UDP port 4789** for overlay network traffic
>**Tip**: Docker recommends that every node in the cluster be on the same layer
3 (IP) subnet with all traffic permitted between nodes.
## What's next?
After you have set up your environment, you're ready to [create a Swarm](create-swarm.md).
<p style="margin-bottom:300px">&nbsp;</p>

Some files were not shown because too many files have changed in this diff Show More